summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Streets <jonathan.streets@10gen.com>2021-02-25 13:33:12 -0500
committerJon Streets <jonathan.streets@10gen.com>2021-02-25 13:33:12 -0500
commiteb1cd218736accb4e959cc85f0d6600ac715d298 (patch)
tree5c1af24bcf9ed39c69b11b64b14bbf7d425bed80
parentd5900efb035676f79f237d0c25b8a1694f7098f6 (diff)
downloadmongo-eb1cd218736accb4e959cc85f0d6600ac715d298.tar.gz
Vendored mongo-tools v3.6 TOOLS-2803
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/Godeps2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common.yml15
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca-ia.pem77
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca.pem78
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ia-ca.pem77
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/server.pem76
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-client.pem88
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-server.pem89
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/tlsgo/tlsgo.go2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options.go115
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options_test.go208
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/testutil/ssl_integration.go4
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/set_goenv.sh3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/buildlogger.py492
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/cleanbb.py105
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/pipe.py87
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/smoke.py1398
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/utils.py230
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/ca.pem17
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client.pem101
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client_revoked.pem34
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/cluster-cert.pem101
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/command_line/test_parsed_options.js202
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl.pem10
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_client_revoked.pem12
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_expired.pem10
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fts.js18
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fun.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/geo_near_random.js99
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/grid.js171
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key11
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameCN.pem101
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameSAN.pem100
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/network.js37
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/parallelTester.js259
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/server.pem34
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/slow_weekly_util.js20
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/smoke.pem50
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/test_background_ops.js340
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/testconfig4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/trace_missing_docs.js90
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/use_extended_timeout.js12
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/misc/biginsert.js18
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/replsets/rslib.js115
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csv1.js42
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport1.js65
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport2.js31
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvimport1.js40
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/a.tsv2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/csvimport1.csv8
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpauth.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpfilename1.js14
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore1.js23
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore10.js63
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore3.js60
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore4.js42
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore6.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore7.js66
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore8.js105
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore9.js79
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js107
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth.js35
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth2.js96
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth3.js199
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpsecondary.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport1.js66
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport3.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport4.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport5.js82
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport6.js26
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_bigarray.js62
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_date.js49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/files1.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/restorewithauth.js113
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/stat1.js22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool1.js44
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool_replset.js69
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tsv1.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/buildlogger.py491
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/cleanbb.py105
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/pipe.py87
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/smoke.py1481
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/utils.py235
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/analyze_plan.js80
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/badSAN.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/ca.pem102
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client.pem54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client_revoked.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/cluster_cert.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/command_line/test_parsed_options.js214
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_auth.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_dur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_httpinterface.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_ipv6.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_journal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_jsonp.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_moveparanoia.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noauth.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noautosplit.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nohttpinterface.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noindexbuildretry.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nomoveparanoia.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noobjcheck.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noprealloc.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nounixsocket.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_objcheck.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl.pem38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_client_revoked.pem41
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_expired.pem38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/expired.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/fts.js18
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/geo_near_random.js101
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/host_ipaddr.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key11
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameCN.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameSAN.pem49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/not_yet_valid.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/parallelTester.js259
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/server.pem58
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers.js961
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers_misc.js357
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/slow_weekly_util.js20
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/smoke.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/test_background_ops.js340
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/testconfig6
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/trace_missing_docs.js90
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/replsets/rslib.js115
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csv1.js43
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport1.js65
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport2.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvimport1.js41
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/a.tsv2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/csvimport1.csv8
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpauth.js29
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpfilename1.js13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore1.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore10.js64
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore3.js61
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore4.js43
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore6.js54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore7.js66
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore8.js107
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore9.js79
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestoreWithNoOptions.js114
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth.js117
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth2.js98
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth3.js200
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_excludecollections.js112
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpsecondary.js39
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport1.js67
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport3.js28
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport4.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport5.js82
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport6.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_bigarray.js59
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_date.js50
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_minkey_maxkey.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/files1.js28
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/restorewithauth.js117
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/stat1.js18
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool1.js44
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool_replset.js69
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tsv1.js33
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/.gitignore12
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/__init__.py1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/buildlogger.py491
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/cleanbb.py105
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmoke.py216
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/__init__.py4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py36
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml19
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml10
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py36
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/native_cert_ssl.yml14
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml23
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/__init__.py7
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/config.py165
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/__init__.py5
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/network.py114
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/pipe.py87
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/process.py234
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/programs.py311
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/errors.py52
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py14
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py284
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/config.py161
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/flush.py97
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py50
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py178
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py37
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/parser.py368
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/selector.py291
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py9
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/executor.py307
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py128
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py209
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py211
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py347
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py151
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py704
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/job.py195
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/report.py330
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/suite.py140
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/summary.py22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py407
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py132
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py88
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py202
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py78
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/queue.py52
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/timer.py125
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py291
-rwxr-xr-xsrc/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/smoke.py1485
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/utils.py235
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types.js33
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types_json.js29
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bad_files.js41
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_broken_pipe.js18
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_options.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/deep_nested.js8
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/output_file.js71
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/check_version.js47
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/topology_helper.js187
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/archive_targets.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/auth_28.config.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/gzip_targets.js36
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.config.yml7
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.linux.sh5
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28.config.js39
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js59
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_26.config.js19
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_28.config.js21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_28.config.js39
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_auth_28.config.js58
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_single_28.config.js39
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/sharding_28.config.js40
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/ssl_28.config.js26
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/standard_dump_targets.config.js30
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/bad_options.js47
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/basic_data.js60
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/data_types.js70
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_broken_pipe.js46
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_views.js80
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/field_file.js60
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_csv.js173
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_json.js92
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/force_table_scan.js126
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/json_array.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/limit.js61
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/namespace_validation.js25
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/nested_fields_csv.js65
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/no_data.js21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/pretty.js33
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/query.js198
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/slave_ok.js63
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/sort_and_skip.js69
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/stdout.js42
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/testdata/simple_field_file2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/type_case.js115
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_db.js61
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_delete.js47
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_get.js81
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_host.js59
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_invalid.js37
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_list.js96
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_local.js102
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_port.js52
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_prefix.js49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_put.js127
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_replace.js79
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_search.js110
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_type.js63
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_version.js29
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js62
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files1.txt1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files2.txt1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files3.txt1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/util/mongofiles_common.js10
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/all_primaries_down_error_code.js65
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/boolean_type.js57
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/collections.js77
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/decimal128.js44
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/drop.js48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/fields.js107
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_document_validation.js110
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_types.js75
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern.js75
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern_mongos.js80
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode.js147
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode_upsert_id_subdoc.js86
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/no_primary_error_code.js65
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/options.js123
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/parse_grace.js113
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/replset.js48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/stoponerror.js40
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_header.csv4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_noheader.csv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/extrafields.csv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/fieldfile4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/parse_grace.csv4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_header.tsv4
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.csv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.tsv3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.csv2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv2
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typedfieldfile5
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/type_case.js98
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/typed_fields.js114
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/types.js117
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/.eslintrc.yml3
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/analyze_plan.js76
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/badSAN.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/ca.pem102
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client.pem54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client_revoked.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/cluster_cert.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js213
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_auth.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_dur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_journal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl.pem38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_client_revoked.pem41
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_expired.pem38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/expired.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/extended_assert.js61
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/fts.js22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/geo_near_random.js100
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/host_ipaddr.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key11
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key21
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameCN.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameSAN.pem49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mongostat.js114
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/not_yet_valid.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/parallelTester.js268
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/server.pem58
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers.js1096
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers_misc.js376
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/slow_weekly_util.js25
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/smoke.pem48
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/test_background_ops.js334
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/testconfig6
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trace_missing_docs.js102
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-ca.pem22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-client.pem49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-server.pem49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/wc_framework.js118
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/15k_collections.js38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/26_to_28.js67
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/28_to_26.js68
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/archive_stdout.js54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/bad_options.js54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_collection_bson.js43
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_db.js29
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/collation.js73
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js138
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_collection.js91
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_db.js86
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_authenticated_user.js111
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_nonexistent_db.js58
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_one_collection.js92
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_with_data.js77
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/duplicate_keys.js75
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/empty_users_and_roles.js33
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/extended_json_metadata.js42
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/index_version_roundtrip.js107
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/indexes.js98
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_dump_target.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_metadata.js22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/keep_index_version.js90
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/large_bulk.js54
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_bson.js20
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_metadata.js22
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/missing_dump.js32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/multiple_dbs.js82
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/namespaces.js152
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_index_restore.js77
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_options_restore.js131
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/norestore_profile.js58
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/objcheck_valid_bson.js46
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js78
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_conflict.js33
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js67
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js19
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_noop.js37
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js40
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js71
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js70
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/ordered_partial_index.js45
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/partial_restore.js83
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js28
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/restore_document_validation.js180
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/sharded_fullrestore.js45
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/stop_on_error.js50
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/symlinks.js46
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/blankdb/README1
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dirbin0 -> 525 bytes
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles.js87
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_26_to_28.js103
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js159
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js144
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern.js69
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern_mongos.js74
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/ssl/ssl_with_system_ca.js43
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_auth.js30
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_broken_pipe.js34
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_custom_headers.js132
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover.js60
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover_shard.js14
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_header.js27
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js45
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_rowcount.js60
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_json.js49
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_reports.js152
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_sharded.js47
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_stress.js56
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_validation.js46
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/util/mongotop_common.js25
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/.travis.yml10
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/LICENSE23
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/Makefile44
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/README.md59
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/appveyor.yml32
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/bench_test.go110
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors.go288
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors_test.go251
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/example_test.go205
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/format_test.go560
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113.go38
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113_test.go178
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/json_test.go51
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack.go177
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack_test.go250
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/.travis.yml16
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE201
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE.libyaml31
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/NOTICE13
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/README.md133
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/apic.go740
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode.go815
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode_test.go1367
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/emitterc.go1685
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode.go390
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode_test.go630
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/example_embedded_test.go41
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/go.mod5
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/limit_test.go128
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/parserc.go1095
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/readerc.go412
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/resolve.go258
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/scannerc.go2711
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/sorter.go113
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/suite_test.go12
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/writerc.go26
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yaml.go466
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlh.go739
-rw-r--r--src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlprivateh.go173
527 files changed, 52625 insertions, 79 deletions
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/Godeps b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/Godeps
index b7dd17d007d..67a9dfd0bd5 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/Godeps
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/Godeps
@@ -1,5 +1,7 @@
gopkg.in/mgo.v2 39b4000d99037e917f3a3b9d2dcab667a9ef284a github.com/10gen/mgo
gopkg.in/tomb.v2 14b3d72120e8d10ea6e6b7f87f7175734b1faab8
+gopkg.in/yaml.v2 0b1645d91e851e735d3e23330303ce81f70adbe3
+github.com/pkg/errors 614d223910a179a466c1767a985424175c39b465
github.com/jtolds/gls 8ddce2a84170772b95dd5d576c48d517b22cac63
github.com/jacobsa/oglematchers 3ecefc49db07722beca986d9bb71ddd026b133f0
github.com/smartystreets/assertions 287b4346dc4e71a038c346375a9d572453bc469b
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common.yml
index 3f06088718f..d1f4e245381 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common.yml
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common.yml
@@ -207,7 +207,10 @@ mongo_tools_variables:
mongod_args: ""
mongod_port: 33333
ssl: &mongod_ssl_startup_args
- mongod_args: "--sslMode requireSSL --sslCAFile common/db/openssl/testdata/ca.pem --sslPEMKeyFile common/db/openssl/testdata/server.pem"
+ mongod_args: "--sslMode requireSSL --sslCAFile common/db/openssl/testdata/ca-ia.pem --sslPEMKeyFile common/db/openssl/testdata/test-server.pem --bind_ip localhost"
+ mongod_port: 33333
+ ssl: &mongod_ssl_startup_args_windows
+ mongod_args: "--sslMode requireSSL --sslCAFile common/db/openssl/testdata/ia-ca.pem --sslPEMKeyFile common/db/openssl/testdata/test-server.pem --bind_ip localhost"
mongod_port: 33333
# Set storage engine as mmapv1 for 32 bit variants because WiredTiger requires 64 bit support.
win32: &mongod_win32_startup_args
@@ -219,7 +222,7 @@ mongo_tools_variables:
mongo_args: &mongo_default_startup_args_string "--port 33333"
mongod_port: 33333
ssl: &mongo_ssl_startup_args
- mongo_args: "--port 33333 --ssl --sslCAFile ./common/db/openssl/testdata/ca.pem --sslPEMKeyFile ./common/db/openssl/testdata/server.pem --sslAllowInvalidCertificates"
+ mongo_args: "--port 33333 --ssl --sslCAFile ./common/db/openssl/testdata/ca-ia.pem --sslPEMKeyFile ./common/db/openssl/testdata/test-client.pem --sslAllowInvalidCertificates"
mongod_port: 33333
functions:
@@ -1612,7 +1615,7 @@ buildvariants:
run_on:
- debian71-test
expansions:
- gorootvars: 'PATH="/opt/go1.8/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.8/go'
+ gorootvars: 'PATH="/opt/go1.11/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.11/go'
build_tags: "sasl ssl"
tasks:
- name: dist
@@ -1832,7 +1835,7 @@ buildvariants:
run_on:
- windows-64-vs2013-compile
expansions:
- <<: *mongod_ssl_startup_args
+ <<: *mongod_ssl_startup_args_windows
<<: *mongo_ssl_startup_args
mongo_os: "windows-64"
mongo_target: "windows_x86_64-2008plus-ssl"
@@ -1947,7 +1950,7 @@ buildvariants:
stepback: false
batchtime: 10080 # weekly
expansions:
- gorootvars: 'PATH="/opt/go1.8/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.8/go CC=/opt/mongodbtoolchain/v2/bin/s390x-mongodb-linux-gcc'
+ gorootvars: 'PATH="/opt/go1.11/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.11/go CC=/opt/mongodbtoolchain/v2/bin/s390x-mongodb-linux-gcc'
build_tags: "sasl ssl"
tasks:
- name: dist
@@ -1982,7 +1985,7 @@ buildvariants:
stepback: false
batchtime: 10080 # weekly
expansions:
- gorootvars: 'PATH="/opt/go1.8/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.8/go CC=/opt/mongodbtoolchain/v2/bin/s390x-mongodb-linux-gcc'
+ gorootvars: 'PATH="/opt/go1.11/go/bin:/opt/mongodbtoolchain/v2/bin/:$PATH" GOROOT=/opt/go1.11/go CC=/opt/mongodbtoolchain/v2/bin/s390x-mongodb-linux-gcc'
build_tags: "sasl ssl"
tasks:
- name: dist
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca-ia.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca-ia.pem
new file mode 100644
index 00000000000..9bfd078c172
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca-ia.pem
@@ -0,0 +1,77 @@
+-----BEGIN CERTIFICATE-----
+MIIGuTCCBKGgAwIBAgIBATANBgkqhkiG9w0BAQsFADCBmjELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5h
+bWUxHzAdBgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRl
+c3RDZXJ0aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcN
+MjAwNTE0MjIwNDA4WhcNMjIwNTE0MjIwNDA4WjCBmjELMAkGA1UEBhMCVVMxCzAJ
+BgNVBAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5hbWUx
+HzAdBgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRlc3RD
+ZXJ0aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvai9QUGngMN2KStKblKKL2wbmeje0
+ciFDqgKE1NeYnVXSb+2Gfh0GsfMlPAHLnrJJavxP+hlEtE7xAUzMXRpG+1UlF/9F
+GoOVXbiD4kcJStnpAem1Rekdnno/BjHnXHi6ICcSzHIj79hZP1duwtBVpbsZL4ex
+NxGJUbQI/HXP4Ii/a6PgVmr214I0Gvzv5vuqqyUzrTqXia5MPjs74Dx6vlxec1RP
+KTweLCOQDv3qyYaTmp9zTflOUXQ5PZYCudhasegZBz/M3dZ/DKEHdmLbgZIEeMO/
+Q0aNeS2nv/6vrM/eNlxC9Pojl4ilJX+O91JfcrKBQHbsArc0YTeFI2MVtFVr9ZMc
+1tDskKOciT4eBkyQ5Fxj6PXqY6drFGTU34v3agbMQ2MuOoEzuU0sa29aIHxU4f1J
+mIHzBx2jQBMhNQoyV8/f/ww85hRI409vFTpKsksQUCphfoAzZkDgE1K1HTmm9EoE
+ALpHYXed/9HqIhhPDvi5rM+YbO+oA8RMWIBfYYigXBbggpkRXJzj1Rszpog9LWvV
+REZZt7/zMKFUVdA6ZX1xPq+YD2LI0GJQPKKFOFZlU8l4hgm28mXNu4cRWALFfQ2p
+oyzqCXDrL8mg6PQMm31SeJYJVoHBxmK3GHm9Xmx/ATJdGzLKOWasA9AQ0ZB4N2EO
+SEyiFmYiR3JfpQIDAQABo4IBBjCCAQIwHQYDVR0OBBYEFA1nxNTcT9PtIMze0tTW
+om9ezkwiMA8GA1UdEwEB/wQFMAMBAf8wgc8GA1UdIwSBxzCBxIAUS5yyG3e0ez1O
+UUuyICi2AEQan7ChgaCkgZ0wgZoxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOWTEk
+MCIGA1UEBwwbVGVzdENlcnRpZmljYXRlTG9jYWxpdHlOYW1lMR8wHQYDVQQKDBZU
+ZXN0Q2VydGlmaWNhdGVPcmdOYW1lMSMwIQYDVQQLDBpUZXN0Q2VydGlmaWNhdGVP
+cmdVbml0TmFtZTESMBAGA1UEAwwJbG9jYWxob3N0ggkAmupop0NK8BQwDQYJKoZI
+hvcNAQELBQADggIBALncufwQDoui6iUBMFwP7VM87EHmkYLfnKG2Jdmcb8zKZ2G/
+umS1glYpPtyFISzo3TZH2kY1x0pwFjuS9zjt+Y7OgmT0Ktqg8Z4iLD7Qf3bPe/2t
+P1gbPyiLQsVoa1StaYYZ9pn/AO4Cm/LenwdtpQLDrR8GIS4GEpYLjSRMor83QlbV
+rb7zJoUf13Ycu996rAoXyeDQ7CFiT9eHh9/7YewKW3c6eaWwSeJcMfOBqZSZiD0h
+e95s2MeMcFhBQtWNnGWdja/PqP/7EgQ2h4ts1mmiPfchvBN06xOHivq43C0J/M7P
+VsGJ2GoHZjRhaZ73J4Uq2sQqqQot+aJeKSFjrxEEajd1NAbRTF3HBturErOS2XT0
+WGMT2mzRvU7ATQyDtp3sDkrJfEhGIyOIqosnqvlgaA3aEHF83FRbtaUi1TElURc5
+u8ter2eSQbjPRADpY/ox26Oca//DeSm3eEa6zTWTYxD2MPE5EPHtoljVxpDipxkv
+XDOwXi2sR6oQMdz11W2jYibVOY4thFsm+FtfIa5jg7ycG9ASuvAupkU5auKU8W+Z
+iDDJI3gHyiD3AKvaoBHIpSTVVQlKryTbo2IIbeerrimxfPjy2mSBsec463KUjpoN
+50WdDp2BcCQqN+I0GEp4zcGLO4DqzOBaeWxy7jCQRVKfv8V2zKZ2X1w2u3Nk
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGwTCCBKmgAwIBAgIJAJrqaKdDSvAUMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxv
+Y2FsaXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEG
+A1UECwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2Fs
+aG9zdDAeFw0yMDA1MTQyMjAzMzNaFw0yNTA1MTQyMjAzMzNaMIGaMQswCQYDVQQG
+EwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxvY2Fs
+aXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEGA1UE
+CwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2FsaG9z
+dDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMwgKFmX/uEl/PeSeqzT
+HeEyoDr0F7u6sbkHFciH7Jfo/6X4eoCannF9HV6I1enHBy5tDl6+otaSYq+3bPNt
+QgHIFjXwGO+lPV6eVv14eA5LOl8Qx3oAQsZAf3Xqdtbr6M8BdkIvATcZMkhGrGNh
+X2IuRi2HlyRNlYv50kzI+MBvW0tBDx3XuSBeRcdTEqKr+TIuuV75tGekfto/YMfW
+nTbivL7t51B97nhc8aHMP/TfXwkNbEIvTuNAFZRhB23irfkV9X+MtawytZOLxL+r
+I0dQj3RE/ltZK04kNXT4MUt0QiWqOi+Y7ATOAyaP3ZN+IRtYCNE2R5o3a4HCGS0n
+i0Lm3KPZzHt/iYfyX8l/kG8msL6mhdnKL99skEZO3aFGfm80MGWfMxx0NcGx62/J
+APV9mrbusMk34BwNjjpVsb+Gx3Y7gGFxYvSj7HYyoldk0bH79noUEeAKKzMJFxS9
+MvX5/t+7tB9Ulbvz/nrd1sk/JuVIo8+HczmThrIYbci+sA89NxIHzDywkMEqj+gr
+8Qx5W0gWhT19MJiwdCqQQFl4OnBxqglKaFoAWIVIgKkYZvhAqd7cUWMMuvSL0yvD
+8+cdT/jomTLoYM439O7xDvMxbuyCh+CPzxPl6UkPCzsG/1SOgSJFxX8/CRWgQOTp
+iJ1a34+Z6sn/a1jGYOAwnHNrAgMBAAGjggEGMIIBAjAdBgNVHQ4EFgQUS5yyG3e0
+ez1OUUuyICi2AEQan7AwDwYDVR0TAQH/BAUwAwEB/zCBzwYDVR0jBIHHMIHEgBRL
+nLIbd7R7PU5RS7IgKLYARBqfsKGBoKSBnTCBmjELMAkGA1UEBhMCVVMxCzAJBgNV
+BAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5hbWUxHzAd
+BgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRlc3RDZXJ0
+aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3SCCQCa6minQ0rw
+FDANBgkqhkiG9w0BAQsFAAOCAgEANMKU0hTWibbZJ2YgMWZfxyXJL/RmsUYr30lC
+4FpzGQ1RZX8Gf4ewKvouyk+14z1x7wx5U78Gy0ahcP/Ek0Od2+hW6t36ImtJIk1Y
+bOP3il02as1TRwNeZ3P6XjKZ61MoW4QBg/ekJLZ84yIY6ZcJYGR9N47nM3Mqgt/Q
+myMa862VPraoiwXx52DTITNG8G4w0YKB/bwQuR8xfFZYzvEaFpc2XDFiY333jmLq
+iEAnMBFjVdgGVTbaGtdaiswnRwZEqJzxZbwNpjfeqL1/G2geMWO8BHXRbtRydizG
+iCmPqO+wUjbsazgydK9KXtpp/qwDsSPOuNE16W8nEv64rPNpGmATi7e3zdSHAtsz
+nq5ggZsVEy23Pc8HtLOaLruMCx/R0EVq/1vY7OVfT/fjCDNkUbp0cWfsFLbTGReg
+qucgU19sGElq+yOvD7yF/Cet5d+jFB7bavot2/rHrNywvDYo7Y46FnOnyeCogeD1
+ozyHl6tb7DepnRIh0lP6HJUdjr61tBgcQ0yZDInkde5bAvKLPwOsWCKI754We5Np
+p/Db5UB8tAU1dSnC1tsP4ktClzbAnZfN3iYZPVKbg+s868cl9ujutWg7EuHEDlWJ
+Sv/UL2kVgWJs3AbKy+qhQOiK7+Qm0xEVvhNXaCXGBH903etaX8yV9T2qHLLg57ph
+z51DrF0=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca.pem
index b1b6f2628da..ce5b8635fcb 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca.pem
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ca.pem
@@ -1,34 +1,48 @@
------BEGIN PRIVATE KEY-----
-MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMbN8D5Au+xWdY+s
-GpUuSFSbHGzYfHmw0yajA9J8PiwDePRMl71OMMsByNsykjzXEr0BBOn4PNO6KW7K
-HdDicRavuC/iFucVpILUiJoLOUCPKb/EyAHUk0r2fdr3Ypd2ZXkD1EXmM9WTQnyW
-PEWqr1T7MmM9PhsD0r8ZbQVu8R49AgMBAAECgYBbC+mguQjXfektOUabV6zsgnUM
-LEElgiPRqAqSFTBr+9MjHwjHO84Ayvpv2MM8dcsxIAxeEr/Yv4NGJ+5rwajESir6
-/7UzqzhXmj6ylqTfbMRJCRsqnwvSfNwpsxtMSYieCxtdYqTLaJLAItBjuZPAYL8W
-9Tf/NMc4AjLLHx7PyQJBAOyOcIS/i23td6ZX+QtppXL1fF/JMiKooE9m/npAT5K/
-hQEaAatdLyQ669id181KY9F0JR1TEbzb0A1yo73soRsCQQDXJSG4ID8lfR9SXnEE
-y/RqYv0eKneER+V7e1Cy7bYHvJxZK0sWXYzIZhTl8PABh3PCoLdxjY0IM7UNWlwU
-dAuHAkAOUaTv9CQ9eDVY5VRW44M3TTLFHYmiXXCuvb5Dqibm7B7h7TASrmZPHB3w
-k8VfUNRv9kbU2pVlSCz0026j7XHnAkEAk/qZP8EGTe3K3mfRCsCSA57EhLwm6phd
-ElrWPcvc2WN0kqyBgAembqwwEZxwKE0XZTYQFw2KhKq0DFQrY3IR/wJAIAnLtabL
-aF819WI/VYlMmwb3GAO2w5KQilGhYl7tv1BghH+Qmg7HZEcIRmSwPKEQveT3YpCH
-nCu38jgPXhhqdg==
------END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIIC3DCCAkWgAwIBAgIJAKwksc/otf2iMA0GCSqGSIb3DQEBCwUAMIGGMQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxHTAbBgNVBAoMFE1vbmdvREIgS2VybmVsIFRvb2xzMRkwFwYDVQQLDBBUb29s
-cyBUZXN0aW5nIENBMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTUwNjA1MTU1MTQ1
-WhcNMzUwNjA0MTU1MTQ1WjCBhjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZ
-b3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MR0wGwYDVQQKDBRNb25nb0RCIEtl
-cm5lbCBUb29sczEZMBcGA1UECwwQVG9vbHMgVGVzdGluZyBDQTESMBAGA1UEAwwJ
-bG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGzfA+QLvsVnWP
-rBqVLkhUmxxs2Hx5sNMmowPSfD4sA3j0TJe9TjDLAcjbMpI81xK9AQTp+DzTuilu
-yh3Q4nEWr7gv4hbnFaSC1IiaCzlAjym/xMgB1JNK9n3a92KXdmV5A9RF5jPVk0J8
-ljxFqq9U+zJjPT4bA9K/GW0FbvEePQIDAQABo1AwTjAdBgNVHQ4EFgQU+QOiCHTF
-8At8aMOBvHF6wWZpcZUwHwYDVR0jBBgwFoAU+QOiCHTF8At8aMOBvHF6wWZpcZUw
-DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQCbbIAjdV+M8RR3ZF1WMBYD
-8aMr55kgtnCWn4mTCDdombCYgtbaPq5sy8Hb/2wLQ9Zl4UuFL5wKWcx3kOLo3cw/
-boj8jnUDnwrsBd2nN7sYdjF+M7FLp6U1AxrE5ejijtg2KCl+p4b7jJgJBSFIQD45
-7CAJVjIrajY4LlJj3x+caQ==
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ia-ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ia-ca.pem
new file mode 100644
index 00000000000..6075fac376a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/ia-ca.pem
@@ -0,0 +1,77 @@
+-----BEGIN CERTIFICATE-----
+MIIGwTCCBKmgAwIBAgIJAJrqaKdDSvAUMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxv
+Y2FsaXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEG
+A1UECwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2Fs
+aG9zdDAeFw0yMDA1MTQyMjAzMzNaFw0yNTA1MTQyMjAzMzNaMIGaMQswCQYDVQQG
+EwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxvY2Fs
+aXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEGA1UE
+CwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2FsaG9z
+dDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMwgKFmX/uEl/PeSeqzT
+HeEyoDr0F7u6sbkHFciH7Jfo/6X4eoCannF9HV6I1enHBy5tDl6+otaSYq+3bPNt
+QgHIFjXwGO+lPV6eVv14eA5LOl8Qx3oAQsZAf3Xqdtbr6M8BdkIvATcZMkhGrGNh
+X2IuRi2HlyRNlYv50kzI+MBvW0tBDx3XuSBeRcdTEqKr+TIuuV75tGekfto/YMfW
+nTbivL7t51B97nhc8aHMP/TfXwkNbEIvTuNAFZRhB23irfkV9X+MtawytZOLxL+r
+I0dQj3RE/ltZK04kNXT4MUt0QiWqOi+Y7ATOAyaP3ZN+IRtYCNE2R5o3a4HCGS0n
+i0Lm3KPZzHt/iYfyX8l/kG8msL6mhdnKL99skEZO3aFGfm80MGWfMxx0NcGx62/J
+APV9mrbusMk34BwNjjpVsb+Gx3Y7gGFxYvSj7HYyoldk0bH79noUEeAKKzMJFxS9
+MvX5/t+7tB9Ulbvz/nrd1sk/JuVIo8+HczmThrIYbci+sA89NxIHzDywkMEqj+gr
+8Qx5W0gWhT19MJiwdCqQQFl4OnBxqglKaFoAWIVIgKkYZvhAqd7cUWMMuvSL0yvD
+8+cdT/jomTLoYM439O7xDvMxbuyCh+CPzxPl6UkPCzsG/1SOgSJFxX8/CRWgQOTp
+iJ1a34+Z6sn/a1jGYOAwnHNrAgMBAAGjggEGMIIBAjAdBgNVHQ4EFgQUS5yyG3e0
+ez1OUUuyICi2AEQan7AwDwYDVR0TAQH/BAUwAwEB/zCBzwYDVR0jBIHHMIHEgBRL
+nLIbd7R7PU5RS7IgKLYARBqfsKGBoKSBnTCBmjELMAkGA1UEBhMCVVMxCzAJBgNV
+BAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5hbWUxHzAd
+BgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRlc3RDZXJ0
+aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3SCCQCa6minQ0rw
+FDANBgkqhkiG9w0BAQsFAAOCAgEANMKU0hTWibbZJ2YgMWZfxyXJL/RmsUYr30lC
+4FpzGQ1RZX8Gf4ewKvouyk+14z1x7wx5U78Gy0ahcP/Ek0Od2+hW6t36ImtJIk1Y
+bOP3il02as1TRwNeZ3P6XjKZ61MoW4QBg/ekJLZ84yIY6ZcJYGR9N47nM3Mqgt/Q
+myMa862VPraoiwXx52DTITNG8G4w0YKB/bwQuR8xfFZYzvEaFpc2XDFiY333jmLq
+iEAnMBFjVdgGVTbaGtdaiswnRwZEqJzxZbwNpjfeqL1/G2geMWO8BHXRbtRydizG
+iCmPqO+wUjbsazgydK9KXtpp/qwDsSPOuNE16W8nEv64rPNpGmATi7e3zdSHAtsz
+nq5ggZsVEy23Pc8HtLOaLruMCx/R0EVq/1vY7OVfT/fjCDNkUbp0cWfsFLbTGReg
+qucgU19sGElq+yOvD7yF/Cet5d+jFB7bavot2/rHrNywvDYo7Y46FnOnyeCogeD1
+ozyHl6tb7DepnRIh0lP6HJUdjr61tBgcQ0yZDInkde5bAvKLPwOsWCKI754We5Np
+p/Db5UB8tAU1dSnC1tsP4ktClzbAnZfN3iYZPVKbg+s868cl9ujutWg7EuHEDlWJ
+Sv/UL2kVgWJs3AbKy+qhQOiK7+Qm0xEVvhNXaCXGBH903etaX8yV9T2qHLLg57ph
+z51DrF0=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIGuTCCBKGgAwIBAgIBATANBgkqhkiG9w0BAQsFADCBmjELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5h
+bWUxHzAdBgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRl
+c3RDZXJ0aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcN
+MjAwNTE0MjIwNDA4WhcNMjIwNTE0MjIwNDA4WjCBmjELMAkGA1UEBhMCVVMxCzAJ
+BgNVBAgMAk5ZMSQwIgYDVQQHDBtUZXN0Q2VydGlmaWNhdGVMb2NhbGl0eU5hbWUx
+HzAdBgNVBAoMFlRlc3RDZXJ0aWZpY2F0ZU9yZ05hbWUxIzAhBgNVBAsMGlRlc3RD
+ZXJ0aWZpY2F0ZU9yZ1VuaXROYW1lMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvai9QUGngMN2KStKblKKL2wbmeje0
+ciFDqgKE1NeYnVXSb+2Gfh0GsfMlPAHLnrJJavxP+hlEtE7xAUzMXRpG+1UlF/9F
+GoOVXbiD4kcJStnpAem1Rekdnno/BjHnXHi6ICcSzHIj79hZP1duwtBVpbsZL4ex
+NxGJUbQI/HXP4Ii/a6PgVmr214I0Gvzv5vuqqyUzrTqXia5MPjs74Dx6vlxec1RP
+KTweLCOQDv3qyYaTmp9zTflOUXQ5PZYCudhasegZBz/M3dZ/DKEHdmLbgZIEeMO/
+Q0aNeS2nv/6vrM/eNlxC9Pojl4ilJX+O91JfcrKBQHbsArc0YTeFI2MVtFVr9ZMc
+1tDskKOciT4eBkyQ5Fxj6PXqY6drFGTU34v3agbMQ2MuOoEzuU0sa29aIHxU4f1J
+mIHzBx2jQBMhNQoyV8/f/ww85hRI409vFTpKsksQUCphfoAzZkDgE1K1HTmm9EoE
+ALpHYXed/9HqIhhPDvi5rM+YbO+oA8RMWIBfYYigXBbggpkRXJzj1Rszpog9LWvV
+REZZt7/zMKFUVdA6ZX1xPq+YD2LI0GJQPKKFOFZlU8l4hgm28mXNu4cRWALFfQ2p
+oyzqCXDrL8mg6PQMm31SeJYJVoHBxmK3GHm9Xmx/ATJdGzLKOWasA9AQ0ZB4N2EO
+SEyiFmYiR3JfpQIDAQABo4IBBjCCAQIwHQYDVR0OBBYEFA1nxNTcT9PtIMze0tTW
+om9ezkwiMA8GA1UdEwEB/wQFMAMBAf8wgc8GA1UdIwSBxzCBxIAUS5yyG3e0ez1O
+UUuyICi2AEQan7ChgaCkgZ0wgZoxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOWTEk
+MCIGA1UEBwwbVGVzdENlcnRpZmljYXRlTG9jYWxpdHlOYW1lMR8wHQYDVQQKDBZU
+ZXN0Q2VydGlmaWNhdGVPcmdOYW1lMSMwIQYDVQQLDBpUZXN0Q2VydGlmaWNhdGVP
+cmdVbml0TmFtZTESMBAGA1UEAwwJbG9jYWxob3N0ggkAmupop0NK8BQwDQYJKoZI
+hvcNAQELBQADggIBALncufwQDoui6iUBMFwP7VM87EHmkYLfnKG2Jdmcb8zKZ2G/
+umS1glYpPtyFISzo3TZH2kY1x0pwFjuS9zjt+Y7OgmT0Ktqg8Z4iLD7Qf3bPe/2t
+P1gbPyiLQsVoa1StaYYZ9pn/AO4Cm/LenwdtpQLDrR8GIS4GEpYLjSRMor83QlbV
+rb7zJoUf13Ycu996rAoXyeDQ7CFiT9eHh9/7YewKW3c6eaWwSeJcMfOBqZSZiD0h
+e95s2MeMcFhBQtWNnGWdja/PqP/7EgQ2h4ts1mmiPfchvBN06xOHivq43C0J/M7P
+VsGJ2GoHZjRhaZ73J4Uq2sQqqQot+aJeKSFjrxEEajd1NAbRTF3HBturErOS2XT0
+WGMT2mzRvU7ATQyDtp3sDkrJfEhGIyOIqosnqvlgaA3aEHF83FRbtaUi1TElURc5
+u8ter2eSQbjPRADpY/ox26Oca//DeSm3eEa6zTWTYxD2MPE5EPHtoljVxpDipxkv
+XDOwXi2sR6oQMdz11W2jYibVOY4thFsm+FtfIa5jg7ycG9ASuvAupkU5auKU8W+Z
+iDDJI3gHyiD3AKvaoBHIpSTVVQlKryTbo2IIbeerrimxfPjy2mSBsec463KUjpoN
+50WdDp2BcCQqN+I0GEp4zcGLO4DqzOBaeWxy7jCQRVKfv8V2zKZ2X1w2u3Nk
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/server.pem
index d2aaa930ff5..5c56ff42b0f 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/server.pem
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/server.pem
@@ -1,32 +1,48 @@
------BEGIN PRIVATE KEY-----
-MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBALOkdwU9Qx4FRn+z
-coBkeYYpVRg0pknPMDo4Q50TqZPfVhroTynx2Or+cjl5csd5hMKxWQpdzGq8JzH9
-2BCLcDz/51vG3tPrpLIB50ABqa0wRGGDOO+XN0h+VkdqJvKReWOsNRoMT3s0Lh78
-BqvRUomYXnbc1RBaxwWa+UoLCFgnAgMBAAECgYBd9XmjLeW6//tds5gB+4tsVpYB
-cRhAprOM3/zNXYlmpHu+2x78y1gvoSJRWWplVvPPeT8fIuxWL0844JJwJN5wyCwN
-nnrA28l6+Tcde+NlzCxwED+QDjAH20BRxCs0BLvnx3WAXRDmUbWAjOl/qPn9H6m1
-nmUQ7H/f6dxZ0vVMQQJBAOl3xeVLyZZ828P/p3PvYkaeIxxVK1QDGOWi/3vC0DrY
-WK8xAoopjj0RHHZ1fL5bG31G3OR9Vc/rfk4a5XPIlRECQQDE+teCTiwV5Wwzdpg3
-r440qOLCmpMXwJr/Jlh+C4c8ebnIQ9P5sSe4wQNHyeEZ2t7SGvPfjr7glpPhAkXy
-JTm3AkEAvNPgvVoUy6Bk5xuJRl2hMNiKMUo5ZxOyOVkiJeklHdMJt3h+Q1zk7ENA
-sBbKM/PgQezkj/FHTIl9eJKMbp8W4QJBAL4aXHyslw12wisUrKkpa7PUviwT5BvL
-TYsrZcIXvCeYTr1BAMX8vBopZNIWuoEqY1sgmfZKnFrB1+wTNpAQbxcCQQCHbtvQ
-1U2p5Pz5XYyaoK2OEZhPMuLnOBMpzjSxRLxKyhb4k+ssIA0IeAiT4RIECtHJ8DJX
-4aZK/qg9WmBH+zbO
------END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIICbzCCAdgCAQEwDQYJKoZIhvcNAQEFBQAwgYYxCzAJBgNVBAYTAlVTMREwDwYD
-VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEdMBsGA1UECgwU
-TW9uZ29EQiBLZXJuZWwgVG9vbHMxGTAXBgNVBAsMEFRvb2xzIFRlc3RpbmcgQ0Ex
-EjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA2MDUxNTUxNDVaFw0zNTA2MDQxNTUx
-NDVaMHkxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
-TmV3IFlvcmsgQ2l0eTEUMBIGA1UECgwLTW9uZ29EQiBJbmMxFTATBgNVBAsMDEtl
-cm5lbCBUb29sczESMBAGA1UEAwwJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUA
-A4GNADCBiQKBgQCzpHcFPUMeBUZ/s3KAZHmGKVUYNKZJzzA6OEOdE6mT31Ya6E8p
-8djq/nI5eXLHeYTCsVkKXcxqvCcx/dgQi3A8/+dbxt7T66SyAedAAamtMERhgzjv
-lzdIflZHaibykXljrDUaDE97NC4e/Aar0VKJmF523NUQWscFmvlKCwhYJwIDAQAB
-MA0GCSqGSIb3DQEBBQUAA4GBACJiTnC3nksZsmMyD88+DuV8IA1DHSby4X/qtDYT
-eSuNbxRKnihXkm2KE+MGn7YeKg4a7FaYiH3ejk0ZBlY3TZXK3I1uh/zIhC9aMnSL
-z0z4OLcqp46F8PpYF7ARtXXWQuOEWe6k+VKy5XP1NX60sEJ0KwGBQjUw3Ys41JE8
-iigw
+MIIDfjCCAmagAwIBAgIBBzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBsMQ8wDQYDVQQD
+EwZzZXJ2ZXIxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEWMBQG
+A1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNVBAYT
+AlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp76KJeDczBqjSPJj
+5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMqwbX0D7hC2r3kAgccMyFoNIudPqIXfXVd
+1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4bhke6R8JRC3O5aMKIAbaiQUAI1Nd8LxIt
+LGvH+ia/DFza1whgB8ym/uzVQB6igOifJ1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEb
+R9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSzU/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHm
+r4de8jhW8wivmjTIvte33jlLibQ5nYIHrlpDLEwlzvDGaIio+OfWcgs2WuPk98MU
+tht0IQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAN
+BgkqhkiG9w0BAQUFAAOCAQEANoYxvVFsIol09BQA0fwryAye/Z4dYItvKhmwB9VS
+t99DsmJcyx0P5meB3Ed8SnwkD0NGCm5TkUY/YLacPP9uJ4SkbPkNZ1fRISyShCCn
+SGgQUJWHbCbcIEj+vssFb91c5RFJbvnenDkQokRvD2VJWspwioeLzuwtARUoMH3Y
+qg0k0Mn7Bx1bW1Y6xQJHeVlnZtzxfeueoFO55ZRkZ0ceAD/q7q1ohTXi0vMydYgu
+1CB6VkDuibGlv56NdjbttPJm2iQoPaez8tZGpBo76N/Z1ydan0ow2pVjDXVOR84Y
+2HSZgbHOGBiycNw2W3vfw7uK0OmiPRTFpJCmewDjYwZ/6w==
-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAp76KJeDczBqjSPJj5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMq
+wbX0D7hC2r3kAgccMyFoNIudPqIXfXVd1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4b
+hke6R8JRC3O5aMKIAbaiQUAI1Nd8LxItLGvH+ia/DFza1whgB8ym/uzVQB6igOif
+J1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEbR9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSz
+U/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHmr4de8jhW8wivmjTIvte33jlLibQ5nYIH
+rlpDLEwlzvDGaIio+OfWcgs2WuPk98MUtht0IQIDAQABAoIBACgi1ilECXCouwMc
+RDzm7Jb7Rk+Q9MVJ79YlG08Q+oRaNjvAzE03PSN5wj1WjDTUALJXPvi7oy82V4qE
+R6Q6Kvbv46aUJpYzKFEk2dw7ACpSLa1LNfjGNtMusnecA/QF/8bxLReRu8s5mBQn
+NDnZvCqllLbfjNlAvsF+/UIn5sqFZpAZPMtPwkTAeh5ge8H9JvrG8y8aXsiFGAhV
+Z7tMZyn8wPCUrRi14NLvVB4hxM66G/tuTp8r9AmeTU+PV+qbCnKXd+v0IS52hvX9
+z75OPfAc66nm4bbPCapb6Yx7WaewPXXU0HDxeaT0BeQ/YfoNa5OT+ZOX1KndSfHa
+VhtmEsECgYEA3m86yYMsNOo+dkhqctNVRw2N+8gTO28GmWxNV9AC+fy1epW9+FNR
+yTQXpBkRrR7qrd5mF7WBc7vAIiSfVs021RMofzn5B1x7jzkH34VZtlviNdE3TZhx
+lPinqo0Yy3UEksgsCBJFIofuCmeTLk4ZtqoiZnXr35RYibaZoQdUT4kCgYEAwQ6Y
+xsKFYFks1+HYl29kR0qUkXFlVbKOhQIlj/dPm0JjZ0xYkUxmzoXD68HrOWgz7hc2
+hZaQTgWf+8cRaZNfh7oL+Iglczc2UXuwuUYguYssD/G6/ZPY15PhItgCghaU5Ewy
+hMwIJ81NENY2EQTgk/Z1KZitXdVJfHl/IPMQgdkCgYASdqkqkPjaa5dDuj8byO8L
+NtTSUYlHJbAmjBbfcyTMG230/vkF4+SmDuznci1FcYuJYyyWSzqzoKISM3gGfIJQ
+rYZvCSDiu4qGGPXOWANaX8YnMXalukGzW/CO96dXPB9lD7iX8uxKMX5Q3sgYz+LS
+hszUNHWf2XB//ehCtZkKAQKBgQCxL2luepeZHx82H9T+38BkYgHLHw0HQzLkxlyd
+LjlE4QCEjSB4cmukvkZbuYXfEVEgAvQKVW6p/SWhGkpT4Gt8EXftKV9dyF21GVXQ
+JZnhUOcm1xBsrWYGLXYi2agrpvgONBTlprERfq5tdnz2z8giZL+RZswu45Nnh8bz
+AcKzuQKBgQCGOQvKvNL5XKKmws/KRkfJbXgsyRT2ubO6pVL9jGQG5wntkeIRaEpT
+oxFtWMdPx3b3cxtgSP2ojllEiISk87SFIN1zEhHZy/JpTF0GlU1qg3VIaA78M1p2
+ZdpUsuqJzYmc3dDbQMepIaqdW4xMoTtZFyenUJyoezz6eWy/NlZ/XQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-client.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-client.pem
new file mode 100644
index 00000000000..c868b0e5e86
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-client.pem
@@ -0,0 +1,88 @@
+-----BEGIN CERTIFICATE-----
+MIIGYTCCBEmgAwIBAgIJAL+8WDUncZFZMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxv
+Y2FsaXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEG
+A1UECwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2Fs
+aG9zdDAeFw0yMDA1MTQyMjU2MzRaFw0yMTA1MTQyMjU2MzRaMIGMMQswCQYDVQQG
+EwJVUzELMAkGA1UECAwCTlkxJjAkBgNVBAcMHVRlc3RDbGllbnRDZXJ0aWZpY2F0
+ZUxvY2FsaXR5MSEwHwYDVQQKDBhUZXN0Q2xpZW50Q2VydGlmaWNhdGVPcmcxJTAj
+BgNVBAsMHFRlc3RDbGllbnRDZXJ0aWZpY2F0ZU9yZ1VuaXQwggIiMA0GCSqGSIb3
+DQEBAQUAA4ICDwAwggIKAoICAQDCxxLZEQlJ8WKABPr7d74ra/Mqh6dPwk0Zos5A
+r3JIlq6/OJ91k0GjgewQn8N/Jhr3BYhlscXhuB1C0gI7agbi58fU8TMOvcqsDM3G
+T9XehOedls5HMBmxkz6l6KXcCRHamhQv09nyBw3VOkoby/AoMdwCZgJIAOGfw8Lp
+dAO71X5llzDPKVQtpA8NQF7uZq6Qv72Papf5OgxN5LwOH1IXW2Yd+Y/j4p0Z7j+x
+31zJ/1Don31CQZK2b7qDojh3X0DH5HO883fw8Q1j9PQc7pPJ4OoXK170cycwln/o
+fsKQsGlaXzJcz0F3XO3xlpgA6ScfJ4F/92kB34IlutFbxacNjoLa1QeolwNYcoO2
+cdL/4XHpgQgwblOcBnSkJoakYeVBwRVaRUxmG8b9bBzLXtOy078naFED6Q6LzP7+
+cT9ZSPdOxk728JMpjIsEFAi5Fpv/nVuGx9/+xIOF1w/Q+Y/nvvThEGc7X08WtLeJ
+tKFTE7OWX+UVeuR4g8UEYEdAGQzstBaDjVtLGky57QY7HZaNbWYUPu6As6HYKazK
+xBXDFs2Cpg8zIroTwjBh7+F56bq06xGZnXX/iXUULKwgwOaAN9cAd5dLX+PFPO9z
+R2Mt5UWYtUL4295JGabDPqI0nDWmv8Omtbat4cHHajAePUn28jy5+32zbt2Bqe/T
+R9/tdQIDAQABo4G1MIGyMB0GA1UdDgQWBBQMaRN+ZH/PO1MrfSYYsOoSRXuR7jAJ
+BgNVHRMEAjAAMA4GA1UdDwEB/wQEAwIFoDBXBglghkgBhvhCAQ0EShZIT3BlblNT
+TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUgZm9yIFRFU1RJTkcgb25seS4gIE5PVCBG
+T1IgUFJPRFVDVElPTiBVU0UuMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD
+AjANBgkqhkiG9w0BAQsFAAOCAgEAgCSb//bj5EOp6v8mvmn688e34DHRevkrBfUt
+SrVyO7vsuZvb0xa8nAKnvpH6h85n5S6VDBKznQndYOPYX1w7QeJK6ZHJYNv9wRs3
+9bO7hT9LiRIas+DwEAheW9KRC2U1xtHNlr5UnkbWqfK0n4u/b1oJKA2wz0wl+B/k
+tbZgEYaa8RIMc0uOel4hzaYUygXKNL8JXoDGzXKqcUO/a7tCW55CQQW8o5NT8sTX
+wwGyNl3vjxxjL3DDJd8HdE3pqnh3Q8sYihipF6K6KGfAj6N4ZH1lZfwP+n7C5vHf
+w5XXhrJ0il+psjSbnGWtZx5GOC1axx1xUxfOMGY//iiwnzSqXfghBX2eMHix1RuB
+4vmLDI6l/t2LfI84Xm+BYrOed6jkb2pfGsK9WK8jzeEX6zkZSDBeHU7ztD7/Jc3p
+Ggl9vRji4Gdqf1rXfiy6P3ke8rVFEoWT73Ocqa+pPhVUhVt4VPnG7dxw3hIA+WOX
+GUOkdG5BLBByhixgTv/dK6B5ka+Y4qfFHRB+DvlMnmDRu+q3fhpAsG2ED6xapLOa
+eDRJYVtFPlgtGzT+b38tnbWHDzk7OCjJdRGF9x+gBZuLsxfhJ4wKcFrbwie+aypj
+5QrzX7X9PG36YCcQPxvzHDwH1OGJaqcd/ZUwHxCapPGamC2jSM5ATzhgTNTFDxVh
+On1Mo4o=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEAwscS2REJSfFigAT6+3e+K2vzKoenT8JNGaLOQK9ySJauvzif
+dZNBo4HsEJ/DfyYa9wWIZbHF4bgdQtICO2oG4ufH1PEzDr3KrAzNxk/V3oTnnZbO
+RzAZsZM+peil3AkR2poUL9PZ8gcN1TpKG8vwKDHcAmYCSADhn8PC6XQDu9V+ZZcw
+zylULaQPDUBe7maukL+9j2qX+ToMTeS8Dh9SF1tmHfmP4+KdGe4/sd9cyf9Q6J99
+QkGStm+6g6I4d19Ax+RzvPN38PENY/T0HO6TyeDqFyte9HMnMJZ/6H7CkLBpWl8y
+XM9Bd1zt8ZaYAOknHyeBf/dpAd+CJbrRW8WnDY6C2tUHqJcDWHKDtnHS/+Fx6YEI
+MG5TnAZ0pCaGpGHlQcEVWkVMZhvG/Wwcy17TstO/J2hRA+kOi8z+/nE/WUj3TsZO
+9vCTKYyLBBQIuRab/51bhsff/sSDhdcP0PmP57704RBnO19PFrS3ibShUxOzll/l
+FXrkeIPFBGBHQBkM7LQWg41bSxpMue0GOx2WjW1mFD7ugLOh2CmsysQVwxbNgqYP
+MyK6E8IwYe/heem6tOsRmZ11/4l1FCysIMDmgDfXAHeXS1/jxTzvc0djLeVFmLVC
++NveSRmmwz6iNJw1pr/DprW2reHBx2owHj1J9vI8uft9s27dganv00ff7XUCAwEA
+AQKCAgEAr3HRsGSAEXK0XswdptU9Hq+l4PE/68AA4s6bKfje4DOaQ/Qk7isuPLGS
+iYbe+XDBaTA6pB91NtQ4xy+TgyEGAuz0OwLoEqcyH7eIwtC2vDDK0FA+jjDtc95F
+1/uvKnug8V9IP6x77F4eP6q5hkA0moAOxLpN472nfdk/gDlYA6FSsP92n6OCh3n3
+dOA+ok8rfoa3oK9LJNr8qk0Wd0IPF6rkWKuqyIIfMHRCO14OnoSzxFWk5kFafao2
+PsuoQR4zo9pLxi8oIod+oELMgCMGTBmjwir25Qm0izeFD07ewqWkwBpvqtmlBndW
+fP5A/NEGxDE5nKKmm2FQQdv9KMToquMNsVrF8n4pwVExgeZlvmKCMsgIf20x0QyR
+LqLNGwY/KmOMjI9rq5dBwPqE0FUmY12yU+f2znJ+cgsiw50D5M+0HCzfYzooH4qK
+KOuNuVyVFFBsRRWUDFF/8twtsCp8W2/Tf1xJV9iImD4tqbzmsyGLkGAMxR5PT6e6
+Ce7ibNmqFmR73lsPBPaKhYMkSt4WP6m9dkvXk71cMzUK6irPt6q48eaB59CpsCO/
+FEgqLzkptEsJGzBYGadLaMBT8C1z8af58q55IEZ8lg+ZXxKh4PflUEuZFrYbuJqw
+9Cut6Ojrtar6wpwqbFoKpu/NTD/EkLB1qzbA0RNcusRzk7iw9wECggEBAOP9bEAG
+/IsHVJ717w4dCrV02BzXlxl5eUgf0q39CGd9jic94tt3N34dY9l3k28yp3WK+Wx0
+ZUCkm0izG3SpQVSbMv9MrfHH7OIIRTtXnpu6cP5JBeoNpc2RJrhDZDcR0CW6GigQ
+NvUiHYGCuvQc9rzp+XGNlngmiTGZUf+GiqvBVzHVrCOX2uFNdPKxTxM9rnQqO1FG
+wdLsMK/4W3y7o2UuL0Gpe8cEcb8+oy0n8/IUSZeMXaxb26HMPoj5d51447bKfuaU
+P3He7UlXhLwIHJ3vB8GDj/GMRr2SiciCanym+lOh8zsjwokl7cN5m0SGWLcwRqN+
+gZfN0koMTSSWa70CggEBANq1FMJ8/TXx0UQNFsSBhAllpQAn0u6SVCdEoyxM+atm
+RsHQCqsN/wn8V3+tiE+zHgPfhYoFqyRIDzaZDKdqwQhoi+arEP53la1HXuU4zSOi
+hBXkuCjA9UR3m9CzvT8xxsfB8dLlhLOVkdHvoUsCgSWz2iYJWFCKCvm7E3HxDfsF
+XXN22KHN2J/mIlRjFd6yzhzgQfBENowg0Y187eItz15tADNcC03lKKqVoo74JZw6
+2N3IlL/thC5FySI84CfQ+eF62mnck9L40e52HKJRguQE96O9UXdfKdjl2EQfWnOQ
+WEwkjm5o1ZviB62iGDMCnmHglSZ47u8LLt3MCTlviBkCggEBAMYe+I3fRGCwmxmU
+EAxMsnyjMBZIRvcNN5a0iMlHhKEiW4DAZ75dqCyA778QCV/tPdaIUV2s/Dwjjwmr
+E2Xam3ohcDyo7tOX78MwerEp4Bvl9cETxf7xoy6zP1mKvrCYWzAdvBMqnAeDgO6F
+o171OHlMPccuyh5ZJomieb46tZCx78rPqso3cS6ut70a1eObi1lJRcjIWzCmrVyN
+zzrieAg6DTjbRkPZGdueXACmonewp475nIFmRcMhkQpI92gxoHi9GW/YThXvKna8
+hg08Y1nfWPcSE+lNBEBC8lb69fyiwhAhQRQYbDnwauCecY4nzTpLymbTTVyUsHRG
+olb0HGUCggEAEFNMBWiHDivKzyWa+XghfwITWvueByjtFzFMbNYiHOU5iuYYpcQZ
+sYkgjutJLYnVR7y7nx3lNIQcfprWEvkpB5OmPcWvMw+CILkfIEUpKuvKqEe2Z3Rf
+2oiDHQUNr1Xr3KVhg7iWO0GC7QTSKWM6eLIAKq6v5PMo9pM8VFkGDiLMYv4QFaPj
+86ueDGaJD2KReB5VTbYJzFP20HTu3N9Gh2njhfHs9heQ/YrF454qUIsj/Qon8Fuu
+bjXPWbrpkI9M18Pv4c8RScJBCWfPo14qQxML2TEcj6iO2x3tEsi0W8P+k1B3QU6a
+A1TP9eoPnF8pq2y0NXb5fuZ6zRuZ5KuqoQKCAQBzFS0ziD9A2WNcSwkjomuZtlDi
+roXDETgBQnX8pWCo1YO8p+OrC/Av2p69pHvhb7v28kB+oRvjbhbQ9BTmwuN9OsLC
+0gXsXuIBeJn+W7QuyyagRxUHrnfnhzVvmHUDRogdtwjw6ffpzdZ3CySZklAw2Qoq
+C9j4YZYjPd7tQccoSSpb8tNS27etoalTx2Dohqdj4yanUpD8bPUNaRl4VE7vi2aj
+2YPNDX0LxfPpgYwXAdDc8v9ZMQdee6nvWQ8gS16YjY3MCQdf/clpfkAoDPXFZ7H5
+huPOQXfp5IZbaiJ4N+GmNi4ZpfvYTThSjBb/ulprGIA3oo0x/gmpwcAgidub
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-server.pem
new file mode 100644
index 00000000000..fda97c23bff
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/openssl/testdata/test-server.pem
@@ -0,0 +1,89 @@
+-----BEGIN CERTIFICATE-----
+MIIGmTCCBIGgAwIBAgIJAL+8WDUncZFYMA0GCSqGSIb3DQEBCwUAMIGaMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTlkxJDAiBgNVBAcMG1Rlc3RDZXJ0aWZpY2F0ZUxv
+Y2FsaXR5TmFtZTEfMB0GA1UECgwWVGVzdENlcnRpZmljYXRlT3JnTmFtZTEjMCEG
+A1UECwwaVGVzdENlcnRpZmljYXRlT3JnVW5pdE5hbWUxEjAQBgNVBAMMCWxvY2Fs
+aG9zdDAeFw0yMDA1MTQyMjA0MjBaFw0yMTA1MTQyMjA0MjBaMIGuMQswCQYDVQQG
+EwJVUzELMAkGA1UECAwCTlkxJjAkBgNVBAcMHVRlc3RTZXJ2ZXJDZXJ0aWZpY2F0
+ZUxvY2FsaXR5MSEwHwYDVQQKDBhUZXN0U2VydmVyQ2VydGlmaWNhdGVPcmcxJTAj
+BgNVBAsMHFRlc3RTZXJ2ZXJDZXJ0aWZpY2F0ZU9yZ1VuaXQxIDAeBgNVBAMMF0h1
+YW5zLU1hY0Jvb2stUHJvLmxvY2FsMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEA5VGWSyzDu6A2t8qx8SuqnFqI0hto7Udak9zTeSDy9SjbtRvzlPuruKFF
+aq1atVJAuzIxzMaV0LLy70EH0LapBEmS+kFzQHwVSo06JaZwGEmWkrD13MW97Xva
+nargF4a8o0O3ZUL73/9SyeF6o7J1eJd9hVRY+YVez2HTbAr9AfObRGmhBKky3NdR
+SNuIT2aTS9eWsiJ7dexr26k5XIMoDQ7agvw+YvmrhAjsyeL8PkLY4U/8Rnb4PYtY
+/7VHkPNvN7Cd+A8GmWZAQGvEeX2VfYgjXUP+8+UrvOVpErtSGzzNNt3q17/qan2p
+mrmHl+lL4epy2f1if1aVT4CiEeqMa81yYiNpbeA6RIDd39bH5IsJOqy+vBqdwILq
+Pz0iDxm18DxUmcPcvLEPDiGOWRKAqvDmNZpIzTbbK9BzWRzaKdAxkIZVT406I70j
+Fp8QKcmfhN1LUg1YV30MnJjjzvVH3wTbM/a7kUSf2uPM1Eb5Ag4z9Q0s7zKjalTB
+qQDMUREPWMA0gufblfyFAYMhHDrqQdzy/JsCTkjtCngUgS/lwzunpA03gHRnq3DF
+IfVn9tTZzopJyCyeku7BrNGBW01DdkKdfMpKgiavIXRaZrg6KqrsngKPQjxijIqZ
+ixJSgHBQVd6fw040bAJsHtLQrpvbF+r6zKVKMVngxyOvfHe07acCAwEAAaOByzCB
+yDAdBgNVHQ4EFgQUh+CUXhE6z6rdlr4/zGgmIqrlYzgwCQYDVR0TBAIwADAOBgNV
+HQ8BAf8EBAMCBaAwVwYJYIZIAYb4QgENBEoWSE9wZW5TU0wgR2VuZXJhdGVkIENl
+cnRpZmljYXRlIGZvciBURVNUSU5HIG9ubHkuICBOT1QgRk9SIFBST0RVQ1RJT04g
+VVNFLjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwFAYDVR0RBA0wC4IJ
+bG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4ICAQAtyk+ncTEqkeCb/gvJTHZdqw1h
+e4J18kemOvtlypWeRfRTWgUKpaI6tMCQUG5XsWIxuawxchncdqJVJkAG45zeI7wQ
+kcIOA/Wk/RhCsnJ1hZufPn+p+CZPG7Y++ySKBjNZ53kkdyVytdO9AoK6knH0NQ5m
+3epsjBtyJAUczUR8pu/WaFmufxlxprqQdsRiH0QK6wpNfcGFUQ1Wg6sijlrfb42e
+IsBOsLIcZ0sCVG8QW3EAp99hiytuAbxpdAqRyH7/JvE2wPrhvw3tuyBCkehdJAqk
+/dfkJpwV9Rz/e/+R9Akw1D5WHZIBL+jGr/dMSKQaSMApw/s4F5ZZWOdK6S47rJNO
+5jyg6WdHpcoE5i0b/6jA6JtNUDJvryhzU99nx+9pnhob9GmdNL7kh10qqkc7CX6Z
+mmENaN94Yt8jbNSj8b7YcVshC3OH1PW7spvSarOiIPVUsORO6zF9HRELdjBpICyS
+hnmIcgbZsjzNRyeNFVw9YOygpREurfZToUkegv1sHVMyY6nxPNjBl2KZP2Z2jApJ
+mH8Jt8uE8Y6qa2ewdvehch7DoecuZBrY0qt9wWNjxJpBT6RJkHvNMbpbGwhK94zP
+2bzWjzagWEJbJlpltJchZba3rPe2I5rrSeOgri6D585Dvz/aUEu3Z/bEhpDw5UTy
+4HzTvJAU7K4T5Ku+Yw==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEA5VGWSyzDu6A2t8qx8SuqnFqI0hto7Udak9zTeSDy9SjbtRvz
+lPuruKFFaq1atVJAuzIxzMaV0LLy70EH0LapBEmS+kFzQHwVSo06JaZwGEmWkrD1
+3MW97XvanargF4a8o0O3ZUL73/9SyeF6o7J1eJd9hVRY+YVez2HTbAr9AfObRGmh
+BKky3NdRSNuIT2aTS9eWsiJ7dexr26k5XIMoDQ7agvw+YvmrhAjsyeL8PkLY4U/8
+Rnb4PYtY/7VHkPNvN7Cd+A8GmWZAQGvEeX2VfYgjXUP+8+UrvOVpErtSGzzNNt3q
+17/qan2pmrmHl+lL4epy2f1if1aVT4CiEeqMa81yYiNpbeA6RIDd39bH5IsJOqy+
+vBqdwILqPz0iDxm18DxUmcPcvLEPDiGOWRKAqvDmNZpIzTbbK9BzWRzaKdAxkIZV
+T406I70jFp8QKcmfhN1LUg1YV30MnJjjzvVH3wTbM/a7kUSf2uPM1Eb5Ag4z9Q0s
+7zKjalTBqQDMUREPWMA0gufblfyFAYMhHDrqQdzy/JsCTkjtCngUgS/lwzunpA03
+gHRnq3DFIfVn9tTZzopJyCyeku7BrNGBW01DdkKdfMpKgiavIXRaZrg6KqrsngKP
+QjxijIqZixJSgHBQVd6fw040bAJsHtLQrpvbF+r6zKVKMVngxyOvfHe07acCAwEA
+AQKCAgEAq2Uidwd7OAYUP6RxAPeJKhmEv70a3JYDPhbDvHoC/XfPVvsQQN8lxvZK
+ouP0sydKGF/41rvqh7ToXCh3xehwRTMUdpVqpdzYMQODXVab25tMvlEjQiJNxW1X
+xZZMrWjYHxLixP2eshnsb9VmS2l74WkguQGFCuJDLa9JTSydU9MWzrhAT/KCgIsy
+RslGhUn+d6YoDPY23rmZF/HQXLPuwGvEqDgODP3c0u+0w2Qy8foWjbixrp0HB0Da
+gSyl09IO40nPl/LrFMZW6Y9ipwdVLyEMhY3nxbUYzrOMqCJ3yMHWA9czypO3imZn
+2Kwoi0MkozMPKpz2/IPjTyuudsOetR385rNLSAwSDPJiEu+oiC5+iyMdohfdeSJI
+11ttMGXs0FVR9eKOiMJJVhtMA4YTP29BkEvBHuWQjrpmPh2+FX1+EVr8mQbmDxlk
+lqbYYVyclUmg9CJ3fdwJia6v88gVIcmJLJGYqtjFoMFbf24qMP8+Vz3ck86n5l6x
+pHFKR9S4j8yGue70tFYWWkiUamJOfsDavqyDxpiR5Vwz+W+enpkao7KpZPIGfhM8
+g0ioCEA40Yq4grPGzByCSxOqE5RMsEY9iBY14EHXNESvFb+LPWozpyrxkaXwy7Zj
+pFJn6SoYjhO952BhTDal95bcA45dcE7o2VO3x4gSUa7pF8PhbYkCggEBAPWmPltO
+pp3dyd2yQGxXbVVduCBrgC2Kc5GKvlAKf94kr8Hy1w29vFQLF+jloYUXWgOfVZ78
+1uTzSsMuPHwcsYp6a5cqdcVxFarzECQGoahs7Pg5o0WKBRTCw3TUIs4l3UOAog/F
+Eq8M4qW/0idi9OHkOY8S8Wh8nSGHnvX2/m0vXLOl1ZBFs+T3ChikvRiy52RfPlSj
+FZ0wWnePuf1RtKDev4X/21zq3gQmJkGzT5rPIjt0iHE8CZrY5FXRWmT6/nYqXaRx
++FU8fwYdxp6EpsNXEbuuzak77BIzaNUxaQ5WklrHyp0T4raJgNCRfSEsbnSNiMO1
+qzcRh00CW6cUTj0CggEBAO77MEbXsRZgTKTGVyyKsSyn8TuJw6U0nkVID2+Bn325
+3o1XY/oLkCp/XlIcStrzNEi6CfJ6T5Tk52A6K6Sko3BGh1hiMSQEyqeT77Jnjp2V
+fGiXy8925QShmWushuz8RgyTf99mME7BVa78E4CqrHFBTM8S1poDfL+e7qrT2oTO
+yDbQgVacLJK5dop47XvicPvtXHPsDHsL1xl3oBoKYnei+L9HiO96VckCL6k2/2n+
+HuhRnKIbnw6gmCk13mlrHDE+xKEue4KQlfttxByRYXyzYLUQdgptsNE1KjAP8jKR
+vmRFik+XHjcJW8rcTeHYI72Xsj9Ni1OTtRbooZ9frbMCggEBALgxaG5kojCbXkM6
+5m2WfKfW9zpwAROkzhViuGMiDFEQpy1BnplyHKXUD6CVlrBKGze+IKBvK99Owk9p
++vaBOWsBcm1N7DMZIUsmqIKq6kp1KpbR+SgqXrb7dz3B1ztGTwvPhzVV6zWQ7t2j
+4Luhyscar0IlzRgw4E2E+N+rzYknS1Z1UZa1fTeBOXPTCutbMoAjnoQuKQXNfPhL
+sAVPX6aEf43FlMGuZr0rseHvxw/oot17+tcVUopmeYgTy7kOLwGBsOeegbu/+esn
+Jog0JSsxvV1CrHlqMhBPthqtyv1yUpl5V+bCQ2vl3M4HWZEnVMdWXSeKsBBBt3c5
+/mlgo20CggEAN+EMHMR6Z90EjtLBt2yGmQZ75YwE7zAqgdCjq1MH1AIOA8aXXfQu
+NYGqsFBlQGT1qLu3b7chvJL5dwqjAdBtVudm5kevbdig+iYhOfn04uv9NwenZE9M
+pZSTpxWkRu2GA5LJp5bhXMjgHBZdCQzfXWwDZbfl7gU6uiOTcuYuhjdDoW2HkJTb
+50LucNptzTXZbU7G+2QKVwshtJbAHNaox4iX2UIExto4DNQyCMMPXd1JpNDQaydC
+Pc1XRuuNoeClqu4eVAHmZ1UffLbh0Dw+K0ZWKoA62Z1kZsajorFX1HM3rYKFIK8Z
+JY1OpVR18YT0dnRt/VdIaLZ09XJXuEhK+QKCAQBUc/smXF3q4srCfhWu8DayQcoN
+pg1fD+bI3b56jNbQh1jcWnYxLE9hzlSSAI/fdHDGoLaPx/A8iCzZD9xFrK4sJ7Hl
+/j7em54PZ3a/gbGrwO00scWZa2mahfw/PzPkbMQIJFMux8CT/xaTPx91Sn66zwDr
+cF2nHV1GMY4rdbRwVuaT3lgvDJSqmuxgFuqIOHcl7gq23OZMFcSlrl4793yemhxR
+6yyCwyqnf6373rF6KyiR1tk94s/4LfWDiOEvmi9KW5PVoVtoy9dgk6itBLFvWPQR
+gsUP+CrZPsekUbcfJAvovgwGnJz0Sa+WfwFSMlgazQkFAwKrrr978w6IdGQj
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/tlsgo/tlsgo.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/tlsgo/tlsgo.go
index c26b7e2dc4f..bf64e9ab5c2 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/tlsgo/tlsgo.go
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/db/tlsgo/tlsgo.go
@@ -40,7 +40,7 @@ func (c *TLSDBConnector) Configure(opts options.ToolOptions) error {
c.config = NewTLSConfig()
- if opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost {
+ if opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.TLSInsecure {
c.config.SetInsecure(true)
}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options.go
index 90052cd3205..1e8ab118a0a 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options.go
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options.go
@@ -10,6 +10,7 @@ package options
import (
"fmt"
+ "io/ioutil"
"os"
"regexp"
"runtime"
@@ -22,6 +23,8 @@ import (
"github.com/mongodb/mongo-tools/common/failpoint"
"github.com/mongodb/mongo-tools/common/log"
"github.com/mongodb/mongo-tools/common/util"
+ "github.com/pkg/errors"
+ "gopkg.in/yaml.v2"
)
// Gitspec that the tool was built with. Needs to be set using -ldflags
@@ -48,6 +51,8 @@ var (
const IncompatibleArgsErrorFormat = "illegal argument combination: cannot specify %s and --uri"
const ConflictingArgsErrorFormat = "illegal argument combination: %s conflicts with --uri"
+const deprecationWarningSSLAllow = "WARNING: --sslAllowInvalidCertificates and --sslAllowInvalidHostnames are deprecated, please use --tlsInsecure instead"
+
// Struct encompassing all of the options that are reused across tools: "help",
// "version", verbosity settings, ssl settings, etc.
type ToolOptions struct {
@@ -93,8 +98,9 @@ type Namespace struct {
// Struct holding generic options
type General struct {
- Help bool `long:"help" description:"print usage"`
- Version bool `long:"version" description:"print the tool version and exit"`
+ Help bool `long:"help" description:"print usage"`
+ Version bool `long:"version" description:"print the tool version and exit"`
+ ConfigPath string `long:"config" value-name:"<filename>" description:"path to a configuration file"`
MaxProcs int `long:"numThreads" hidden:"true"`
Failpoints string `long:"failpoints" hidden:"true"`
@@ -139,9 +145,10 @@ type SSL struct {
SSLPEMKeyFile string `long:"sslPEMKeyFile" value-name:"<filename>" description:"the .pem file containing the certificate and key"`
SSLPEMKeyPassword string `long:"sslPEMKeyPassword" value-name:"<password>" description:"the password to decrypt the sslPEMKeyFile, if necessary"`
SSLCRLFile string `long:"sslCRLFile" value-name:"<filename>" description:"the .pem file containing the certificate revocation list"`
- SSLAllowInvalidCert bool `long:"sslAllowInvalidCertificates" description:"bypass the validation for server certificates"`
- SSLAllowInvalidHost bool `long:"sslAllowInvalidHostnames" description:"bypass the validation for server name"`
+ SSLAllowInvalidCert bool `long:"sslAllowInvalidCertificates" hidden:"true" description:"bypass the validation for server certificates"`
+ SSLAllowInvalidHost bool `long:"sslAllowInvalidHostnames" hidden:"true" description:"bypass the validation for server name"`
SSLFipsMode bool `long:"sslFIPSMode" description:"use FIPS mode of the installed openssl library"`
+ TLSInsecure bool `long:"tlsInsecure" description:"bypass the validation for server's certificate chain and host name"`
}
// Struct holding auth-related options
@@ -418,14 +425,25 @@ func (o *ToolOptions) AddOptions(opts ExtraOptions) {
}
}
-// Parse the command line args. Returns any extra args not accounted for by
-// parsing, as well as an error if the parsing returns an error.
+// ParseArgs parses a potential config file followed by the command line args, overriding
+// any values in the config file. Returns any extra args not accounted for by parsing,
+// as well as an error if the parsing returns an error.
func (o *ToolOptions) ParseArgs(args []string) ([]string, error) {
+ LogSensitiveOptionWarnings(args)
+
+ if err := o.ParseConfigFile(args); err != nil {
+ return []string{}, err
+ }
+
args, err := o.parser.ParseArgs(args)
if err != nil {
return []string{}, err
}
+ if o.SSLAllowInvalidCert || o.SSLAllowInvalidHost {
+ log.Logvf(log.Always, deprecationWarningSSLAllow)
+ }
+
// connect directly, unless a replica set name is explicitly specified
if o.Host != "" {
_, o.ReplicaSetName = util.ParseConnectionString(o.Host)
@@ -448,6 +466,91 @@ func (o *ToolOptions) ParseArgs(args []string) ([]string, error) {
return args, err
}
+// LogSensitiveOptionWarnings logs a warning for any sensitive information (i.e. passwords)
+// that appear on the command line for the --password, --uri and --sslPEMKeyPassword options.
+// This also applies to a connection string that appears as a positional argument.
+func LogSensitiveOptionWarnings(args []string) {
+ passwordMsg := "WARNING: On some systems, a password provided directly using " +
+ "--password may be visible to system status programs such as `ps` that may be " +
+ "invoked by other users. Consider omitting the password to provide it via stdin, " +
+ "or using the --config option to specify a configuration file with the password."
+
+ uriMsg := "WARNING: On some systems, a password provided directly in a connection string " +
+ "or using --uri may be visible to system status programs such as `ps` that may be " +
+ "invoked by other users. Consider omitting the password to provide it via stdin, " +
+ "or using the --config option to specify a configuration file with the password."
+
+ sslMsg := "WARNING: On some systems, a password provided directly using --sslPEMKeyPassword " +
+ "may be visible to system status programs such as `ps` that may be invoked by other users. " +
+ "Consider using the --config option to specify a configuration file with the password."
+
+ // Create temporary options for parsing command line args.
+ tempOpts := New("", "", EnabledOptions{Auth: true, Connection: true, URI: true})
+ _, err := tempOpts.parser.ParseArgs(args)
+ if err != nil {
+ return
+ }
+
+ // Log a message for --password, if specified.
+ if tempOpts.Auth.Password != "" {
+ log.Logvf(log.Always, passwordMsg)
+ }
+
+ // Log a message for --uri or a positional connection string, if either is specified.
+ uri := tempOpts.URI.ConnectionString
+ if uri != "" {
+ if cs, err := connstring.ParseURIConnectionString(uri); err == nil && cs.Password != "" {
+ log.Logvf(log.Always, uriMsg)
+ }
+ }
+
+ // Log a message for --sslPEMKeyPassword, if specified.
+ if tempOpts.SSL.SSLPEMKeyPassword != "" {
+ log.Logvf(log.Always, sslMsg)
+ }
+}
+
+// ParseConfigFile iterates over args to find a --config option. If not found, we return.
+// If found, we read the contents of the specified config file in YAML format. We parse
+// any values corresponding to --password, --uri and --sslPEMKeyPassword, and store them
+// in the opts.
+func (opts *ToolOptions) ParseConfigFile(args []string) error {
+ // Get config file path from the arguments, if specified.
+ _, err := opts.parser.ParseArgs(args)
+ if err != nil {
+ return err
+ }
+
+ // No --config option was specified.
+ if opts.General.ConfigPath == "" {
+ return nil
+ }
+
+ // --config option specifies a file path.
+ configBytes, err := ioutil.ReadFile(opts.General.ConfigPath)
+ if err != nil {
+ return errors.Wrapf(err, "error opening file with --config")
+ }
+
+ // Unmarshal the config file as a top-level YAML file.
+ var config struct {
+ Password string `yaml:"password"`
+ ConnectionString string `yaml:"uri"`
+ SSLPEMKeyPassword string `yaml:"sslPEMKeyPassword"`
+ }
+ err = yaml.UnmarshalStrict(configBytes, &config)
+ if err != nil {
+ return errors.Wrapf(err, "error parsing config file %s", opts.General.ConfigPath)
+ }
+
+ // Assign each parsed value to its respective ToolOptions field.
+ opts.Auth.Password = config.Password
+ opts.URI.ConnectionString = config.ConnectionString
+ opts.SSL.SSLPEMKeyPassword = config.SSLPEMKeyPassword
+
+ return nil
+}
+
func (opts *ToolOptions) handleUnknownOption(option string, arg flags.SplitArgument, args []string) ([]string, error) {
if option == "dbpath" || option == "directoryperdb" || option == "journal" {
return args, fmt.Errorf("--dbpath and related flags are not supported in 3.0 tools.\n" +
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options_test.go
index fbf603c8947..54aeaf3cdea 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options_test.go
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/options/options_test.go
@@ -7,7 +7,13 @@
package options
import (
+ "bytes"
+ "io/ioutil"
+ "os"
+
"github.com/mongodb/mongo-tools/common/connstring"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/testtype"
. "github.com/smartystreets/goconvey/convey"
"runtime"
@@ -15,6 +21,11 @@ import (
"time"
)
+const (
+ ShouldSucceed = iota
+ ShouldFail
+)
+
func TestVerbosityFlag(t *testing.T) {
Convey("With a new ToolOptions", t, func() {
enabled := EnabledOptions{false, false, false, false}
@@ -422,3 +433,200 @@ func TestHiddenOptionsDefaults(t *testing.T) {
})
}
+
+func TestDeprecationWarning(t *testing.T) {
+ if !testtype.HasTestType(testtype.SSLTestType) {
+ t.SkipNow()
+ }
+
+ Convey("deprecate message", t, func() {
+ var buffer bytes.Buffer
+
+ log.SetWriter(&buffer)
+ defer log.SetWriter(os.Stderr)
+
+ Convey("Warning for sslAllowInvalidHostnames", func() {
+ enabled := EnabledOptions{Connection: true}
+ opts := New("test", "", enabled)
+ args := []string{"--sslAllowInvalidHostnames", "mongodb://user:pass@foo/"}
+ _, err := opts.ParseArgs(args)
+ So(err, ShouldBeNil)
+ result := buffer.String()
+ So(result, ShouldContainSubstring, deprecationWarningSSLAllow)
+ })
+
+ Convey("Warning for sslAllowInvalidCertificates", func() {
+ enabled := EnabledOptions{Connection: true}
+ opts := New("test", "", enabled)
+ args := []string{"--ssl", "--sslAllowInvalidCertificates", "mongodb://user:pass@foo/"}
+ _, err := opts.ParseArgs(args)
+ So(err, ShouldBeNil)
+ result := buffer.String()
+ So(result, ShouldContainSubstring, deprecationWarningSSLAllow)
+ })
+
+ Convey("No Warning for tlsInsecure", func() {
+ enabled := EnabledOptions{Connection: true}
+ opts := New("test", "", enabled)
+ args := []string{"--ssl", "--tlsInsecure", "mongodb://user:pass@foo/"}
+ _, err := opts.ParseArgs(args)
+ So(err, ShouldBeNil)
+ result := buffer.String()
+ So(result, ShouldNotContainSubstring, deprecationWarningSSLAllow)
+ })
+ })
+}
+
+type configTester struct {
+ description string
+ yamlBytes []byte
+ expectedOpts *ToolOptions
+ outcome int
+}
+
+func runConfigFileTestCases(testCases []configTester) {
+ configFilePath := "./test-config.yaml"
+ args := []string{"--config", configFilePath}
+ defer os.Remove(configFilePath)
+
+ for _, testCase := range testCases {
+ if err := ioutil.WriteFile(configFilePath, testCase.yamlBytes, 0644); err != nil {
+ So(err, ShouldBeNil)
+ }
+ opts := New("test", "", EnabledOptions{true, true, true, true})
+ err := opts.ParseConfigFile(args)
+
+ var assertion func()
+ if testCase.outcome == ShouldSucceed {
+ assertion = func() {
+ So(err, ShouldBeNil)
+ So(opts.Auth.Password, ShouldEqual, testCase.expectedOpts.Auth.Password)
+ So(opts.URI.ConnectionString, ShouldEqual, testCase.expectedOpts.URI.ConnectionString)
+ So(opts.SSL.SSLPEMKeyPassword, ShouldEqual, testCase.expectedOpts.SSL.SSLPEMKeyPassword)
+ }
+ } else {
+ assertion = func() {
+ So(err, ShouldNotBeNil)
+ }
+ }
+
+ Convey(testCase.description, assertion)
+ }
+}
+
+func createExpectedOpts(pw string, uri string, ssl string) *ToolOptions {
+ opts := New("test", "", EnabledOptions{true, true, true, true})
+ opts.Auth.Password = pw
+ opts.URI.ConnectionString = uri
+ opts.SSL.SSLPEMKeyPassword = ssl
+ return opts
+}
+
+func TestParseConfigFile(t *testing.T) {
+ if !testtype.HasTestType(testtype.UnitTestType) {
+ t.SkipNow()
+ }
+
+ Convey("should error with no config file specified", t, func() {
+ opts := New("test", "", EnabledOptions{})
+
+ // --config at beginning of args list
+ args := []string{"--config", "--database", "myDB"}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+
+ // --config at end of args list
+ args = []string{"--database", "myDB", "--config"}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+
+ // --config= at beginning of args list
+ args = []string{"--config=", "--database", "myDB"}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+
+ // --config= at end of args list
+ args = []string{"--database", "myDB", "--config="}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+ })
+
+ Convey("should error with non-existent config file specified", t, func() {
+ opts := New("test", "", EnabledOptions{})
+
+ // --config with non-existent file
+ args := []string{"--config", "DoesNotExist.yaml", "--database", "myDB"}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+
+ // --config= with non-existent file
+ args = []string{"--config=DoesNotExist.yaml", "--database", "myDB"}
+ So(opts.ParseConfigFile(args), ShouldNotBeNil)
+ })
+
+ Convey("with an existing config file specified", t, func() {
+ runConfigFileTestCases([]configTester{
+ {
+ "containing nothing (empty file)",
+ []byte(""),
+ createExpectedOpts("", "", ""),
+ ShouldSucceed,
+ },
+ {
+ "containing only password field",
+ []byte("password: abc123"),
+ createExpectedOpts("abc123", "", ""),
+ ShouldSucceed,
+ },
+ {
+ "containing only uri field",
+ []byte("uri: abc123"),
+ createExpectedOpts("", "abc123", ""),
+ ShouldSucceed,
+ },
+ {
+ "containing only sslPEMKeyPassword field",
+ []byte("sslPEMKeyPassword: abc123"),
+ createExpectedOpts("", "", "abc123"),
+ ShouldSucceed,
+ },
+ {
+ "containing all of password, uri and sslPEMKeyPassword fields",
+ []byte("password: abc123\nuri: def456\nsslPEMKeyPassword: ghi789"),
+ createExpectedOpts("abc123", "def456", "ghi789"),
+ ShouldSucceed,
+ },
+ {
+ "containing a duplicate field",
+ []byte("password: abc123\npassword: def456"),
+ nil,
+ ShouldFail,
+ },
+ {
+ "containing an unsupported or misspelled field",
+ []byte("pasword: abc123"),
+ nil,
+ ShouldFail,
+ },
+ })
+ })
+
+ Convey("with command line args that override config file values", t, func() {
+ configFilePath := "./test-config.yaml"
+ defer os.Remove(configFilePath)
+ if err := ioutil.WriteFile(configFilePath, []byte("password: abc123"), 0644); err != nil {
+ So(err, ShouldBeNil)
+ }
+
+ Convey("with --config followed by --password", func() {
+ args := []string{"--config=" + configFilePath, "--password=def456"}
+ opts := New("test", "", EnabledOptions{Auth: true})
+ _, err := opts.ParseArgs(args)
+ So(err, ShouldBeNil)
+ So(opts.Auth.Password, ShouldEqual, "def456")
+ })
+
+ Convey("with --password followed by --config", func() {
+ args := []string{"--password=ghi789", "--config=" + configFilePath}
+ opts := New("test", "", EnabledOptions{Auth: true})
+ _, err := opts.ParseArgs(args)
+ So(err, ShouldBeNil)
+ So(opts.Auth.Password, ShouldEqual, "ghi789")
+ })
+ })
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/testutil/ssl_integration.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/testutil/ssl_integration.go
index 8fc9293bd17..008066f05ee 100644
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/testutil/ssl_integration.go
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/common/testutil/ssl_integration.go
@@ -15,8 +15,8 @@ func GetSSLOptions() commonOpts.SSL {
if testtype.HasTestType(testtype.SSLTestType) {
return commonOpts.SSL{
UseSSL: true,
- SSLCAFile: "../common/db/openssl/testdata/ca.pem",
- SSLPEMKeyFile: "../common/db/openssl/testdata/server.pem",
+ SSLCAFile: "../common/db/openssl/testdata/ca-ia.pem",
+ SSLPEMKeyFile: "../common/db/openssl/testdata/test-client.pem",
}
}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/set_goenv.sh b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/set_goenv.sh
index 5f7b32bb7bf..c2c8ed99183 100755
--- a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/set_goenv.sh
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/set_goenv.sh
@@ -111,8 +111,7 @@ buildflags() {
UNAME_S=$(PATH="/usr/bin:/bin" uname -s)
case $UNAME_S in
Linux)
- # SPLUNK - gcc go does not have support for buildmode
- #flags="-buildmode=pie"
+ flags="-buildmode=pie"
;;
esac
echo "$flags"
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/buildlogger.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..0dfb5ab5f8c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/buildlogger.py
@@ -0,0 +1,492 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/cleanbb.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/pipe.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/pipe.py
new file mode 100644
index 00000000000..bb080721b2d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/resmokelib/core/pipe.py
@@ -0,0 +1,87 @@
+"""
+Helper class to read output of a subprocess. Used to avoid deadlocks
+from the pipe buffer filling up and blocking the subprocess while it's
+being waited on.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class LoggerPipe(threading.Thread):
+ """
+ Asynchronously reads the output of a subprocess and sends it to a
+ logger.
+ """
+
+ # The start() and join() methods are not intended to be called directly on the LoggerPipe
+ # instance. Since we override them for that effect, the super's version are preserved here.
+ __start = threading.Thread.start
+ __join = threading.Thread.join
+
+ def __init__(self, logger, level, pipe_out):
+ """
+ Initializes the LoggerPipe with the specified logger, logging
+ level to use, and pipe to read from.
+ """
+
+ threading.Thread.__init__(self)
+ # Main thread should not call join() when exiting
+ self.daemon = True
+
+ self.__logger = logger
+ self.__level = level
+ self.__pipe_out = pipe_out
+
+ self.__lock = threading.Lock()
+ self.__condition = threading.Condition(self.__lock)
+
+ self.__started = False
+ self.__finished = False
+
+ LoggerPipe.__start(self)
+
+ def start(self):
+ raise NotImplementedError("start should not be called directly")
+
+ def run(self):
+ """
+ Reads the output from 'pipe_out' and logs each line to 'logger'.
+ """
+
+ with self.__lock:
+ self.__started = True
+ self.__condition.notify_all()
+
+ # Close the pipe when finished reading all of the output.
+ with self.__pipe_out:
+ # Avoid buffering the output from the pipe.
+ for line in iter(self.__pipe_out.readline, b""):
+ # Convert the output of the process from a bytestring to a UTF-8 string, and replace
+ # any characters that cannot be decoded with the official Unicode replacement
+ # character, U+FFFD. The log messages of MongoDB processes are not always valid
+ # UTF-8 sequences. See SERVER-7506.
+ line = line.decode("utf-8", "replace")
+ self.__logger.log(self.__level, line.rstrip())
+
+ with self.__lock:
+ self.__finished = True
+ self.__condition.notify_all()
+
+ def join(self, timeout=None):
+ raise NotImplementedError("join should not be called directly")
+
+ def wait_until_started(self):
+ with self.__lock:
+ while not self.__started:
+ self.__condition.wait()
+
+ def wait_until_finished(self):
+ with self.__lock:
+ while not self.__finished:
+ self.__condition.wait()
+
+ # No need to pass a timeout to join() because the thread should already be done after
+ # notifying us it has finished reading output from the pipe.
+ LoggerPipe.__join(self) # Tidy up the started thread.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/smoke.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/smoke.py
new file mode 100755
index 00000000000..8f45bf21e9a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/smoke.py
@@ -0,0 +1,1398 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+import logging
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
+
+from buildscripts.resmokelib.core import pipe
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API, for a sneaky
+# purpose below.
+class Nothing(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, type, value, traceback):
+ return not isinstance(value, Exception)
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(object):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ self.job_object = None
+ self._inner_proc_pid = None
+ self._stdout_pipe = None
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.stop()
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ return not isinstance(value, Exception)
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ # If the mongod process is spawned under buildlogger.py, then the first line of output
+ # should include the pid of the underlying mongod process. If smoke.py didn't create its own
+ # job object because it is already inside one, then the pid is used to attempt to terminate
+ # the underlying mongod process.
+ first_line = self.proc.stdout.readline()
+ match = re.search("^\[buildlogger.py\] pid: (?P<pid>[0-9]+)$", first_line.rstrip())
+ if match is not None:
+ self._inner_proc_pid = int(match.group("pid"))
+ else:
+ # The first line of output didn't include the pid of the underlying mongod process. We
+ # write the first line of output to smoke.py's stdout to ensure the message doesn't get
+ # lost since it's possible that buildlogger.py isn't being used.
+ sys.stdout.write(first_line)
+
+ logger = logging.Logger("", level=logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(fmt="%(message)s"))
+ logger.addHandler(handler)
+
+ self._stdout_pipe = pipe.LoggerPipe(logger, logging.INFO, self.proc.stdout)
+ self._stdout_pipe.wait_until_started()
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On Windows, this
+ method also assigns the started process to a job object if a new
+ one was created. This ensures that any child processes of this
+ process can be killed with a single call to TerminateJobObject
+ (see self.stop()).
+ """
+
+ creation_flags = 0
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+ import win32process
+
+ # Don't create a job object if the current process is already inside one.
+ if not win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ proc = Popen(argv, creationflags=creation_flags, stdout=PIPE, stderr=None, bufsize=0)
+
+ if self.job_object is not None:
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32" and self.job_object is not None:
+ # If smoke.py created its own job object, then we clean up the spawned processes by
+ # terminating it.
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ import time
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif os.sys.platform == "win32":
+ # If smoke.py didn't create its own job object, then we attempt to clean up the
+ # spawned processes by terminating them individually.
+ import win32api
+ import win32con
+ import win32event
+ import win32process
+ import winerror
+
+ def win32_terminate(handle):
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python
+ # 2.7 because earlier versions do not catch exceptions.
+ try:
+ win32process.TerminateProcess(handle, -1)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has
+ # already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ # Terminate the mongod process underlying buildlogger.py if one exists.
+ if self._inner_proc_pid is not None:
+ # The PROCESS_TERMINATE privilege is necessary to call TerminateProcess() and
+ # the SYNCHRONIZE privilege is necessary to call WaitForSingleObject(). See
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx
+ # for more details.
+ required_access = win32con.PROCESS_TERMINATE | win32con.SYNCHRONIZE
+ inner_proc_handle = win32api.OpenProcess(required_access,
+ False,
+ self._inner_proc_pid)
+ try:
+ win32_terminate(inner_proc_handle)
+ win32event.WaitForSingleObject(inner_proc_handle, win32event.INFINITE)
+ finally:
+ win32api.CloseHandle(inner_proc_handle)
+
+ win32_terminate(self.proc._handle)
+ else:
+ # This function not available in Python 2.5
+ self.proc.terminate()
+ except AttributeError:
+ from os import kill
+ kill(self.proc.pid, 15)
+ self.proc.wait()
+
+ if self._stdout_pipe is not None:
+ self._stdout_pipe.wait_until_finished()
+
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets"]
+# look for jstests and one of the above suites separated by either posix or windows slashes
+forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs)))
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if swm == "legacy": # change when the default changes to "commands"
+ if use_write_commands or forceCommandsRE.search(path):
+ swm = "commands"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/use_extended_timeout.js");' + \
+ 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+ if argv[0].endswith( 'test' ) or argv[0].endswith( 'test.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ if start_mongod:
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ else:
+ master = Nothing()
+ try:
+ if small_oplog:
+ slave = mongod(slave=True,
+ set_parameters=set_parameters).__enter__()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+ else:
+ slave = Nothing()
+
+ try:
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.__exit__(None, None, None)
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ finally:
+ slave.__exit__(None, None, None)
+ finally:
+ master.__exit__(None, None, None)
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "gle": ("gle/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['test',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'test.exe'
+ else:
+ program = 'test'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('test', 'test.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/utils.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/utils.py
new file mode 100644
index 00000000000..68273ee69c8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/buildscripts/utils.py
@@ -0,0 +1,230 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/authTestsKey b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/ca.pem
new file mode 100644
index 00000000000..f739ef0627b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/ca.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w
+DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0
+IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz
+MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI
+DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH
+ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx
+GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB
+jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27
+nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz
+hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN
+BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM
+hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB
+2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E
+qQ==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client.pem
new file mode 100644
index 00000000000..85ace4fd40b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 7 (0x7)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 23 14:55:32 2013 GMT
+ Not After : Jan 7 14:55:32 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a:
+ 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1:
+ 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51:
+ 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f:
+ 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79:
+ 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c:
+ 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae:
+ 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd:
+ c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53:
+ a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31:
+ 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35:
+ 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98:
+ be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18:
+ b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe:
+ a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15:
+ 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2:
+ 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4:
+ 6e:a7
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d:
+ f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5:
+ f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db:
+ af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87:
+ 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35:
+ 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa:
+ 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85:
+ 24:18
+-----BEGIN CERTIFICATE-----
+MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0
+NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET
+MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b
+qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM
+zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V
+rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad
+STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B
+MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ
+BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0
+aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw
+FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54
+xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb
+r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh
+9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee
+p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y
+LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j
+mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW
+WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9
+jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+
+flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4
+H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m
+2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4
+tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU
+w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S
+eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/
+vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC
+yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn
+LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s
+9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo
+czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS
+q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop
+59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4
+9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9
+SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn
+X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU
+0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52
+re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT
+F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3
+57rGT6p0OuM8qbrTzpv3JMrm
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..276e62644b6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/client_revoked.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBDDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MjUzMVoXDTQxMDQyMjE1MjUzMVowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZjbGllbnQwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBALX6DqSWRJBEJJRIRqG5X3cFHzse5jGIdV8fTqikaVitvuhs
+15z1njzfqBQZMJBCEvNb4eaenXJRMBDkEOcbfy6ah+ZLLqGFy7b6OxTROfx++3fT
+gsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN/ufbH2sX451nVd+j6oAz0dTz7RvhAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjciYidtPfd5ILsm7c2yYGV99vwjAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCgs74YrlZ6nivONRO8tNWi+gJ1TcWbQV+5yfF7Ispxo1TFxpa6GTWeZA3X4CwK
+PHmCdhb+oZoi59Qny0KECxtBj6zwdYIKLN0gIFYygaGX5J+YrRVatTjCJUHz9fco
+hZwApLEUkYg2Ldvbg+FncDwiVhi74OW685SkThNIulmPcQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALX6DqSWRJBEJJRI
+RqG5X3cFHzse5jGIdV8fTqikaVitvuhs15z1njzfqBQZMJBCEvNb4eaenXJRMBDk
+EOcbfy6ah+ZLLqGFy7b6OxTROfx++3fTgsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN
+/ufbH2sX451nVd+j6oAz0dTz7RvhAgMBAAECgYEAmHRy+g5uSJLeNmBK1EiSIwtm
+e8hKP+s7scJvyrdbDpEZJG2zQWtA82zIynXECsdgSwOKQQRXkaNU6oG3a3bM19uY
+0CqFRb9EwOLIStp+CM5zLRGmUr73u/+JrBPUWWFJkJvINvTXt18CMnCmosTvygWB
+IBZqsuEXQ6JcejxzQ6UCQQDdVUNdE2JgHp1qrr5l8563dztcrfCxuVFtgsj6qnhd
+UrBAa388B9kn4yVAe2i55xFmtHsO9Bz3ViiDFO163SafAkEA0nq8PeZtcIlZ2c7+
+6/Vdw1uLE5APVG2H9VEZdaVvkwIIXo8WQfMwWo5MQyPjVyBhUGlDwnKa46AcuplJ
+2XMtfwJBAIDrMfKb4Ng13OEP6Yz+yvr4MxZ3plQOqlRMMn53HubUzB6pvpGbzKwE
+DWWyvDxUT/lvtKHwJJMYlz5KyUygVecCQHr50RBNmLW+2muDILiWlOD2lIyqh/pp
+QJ2Zc8mkDkuTTXaKHZQM1byjFXXI+yRFu/Xyeu+abFsAiqiPtXFCdVsCQHai+Ykv
+H3y0mUJmwBVP2fBE3GiTGlaadM0auZKu7/ad+yo7Hv8Kibacwibzrj9PjT3mFSSF
+vujX1oWOaxAMVbE=
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/cluster-cert.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/cluster-cert.pem
new file mode 100644
index 00000000000..74dc9845e3d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/cluster-cert.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 5 (0x5)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 7 17:19:17 2013 GMT
+ Not After : Dec 22 17:19:17 2040 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=clustertest
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:98:ec:01:6e:f4:ae:8e:16:c8:87:a2:44:86:a0:
+ 45:5c:ca:82:56:ba:0d:a9:60:bf:07:40:da:db:70:
+ 33:a6:c2:ec:9d:e1:f0:da:fe:b9:f9:ac:23:33:64:
+ e6:63:71:cc:a2:0d:eb:86:bc:31:32:aa:30:e6:1d:
+ 5d:6d:fd:45:f4:2f:dc:72:93:bc:92:27:f7:6a:5a:
+ 18:04:f7:64:d0:6a:3c:a9:14:f6:9e:9d:58:26:f4:
+ 16:93:7e:3d:2e:3c:9e:54:41:4d:1a:e1:bd:b4:cf:
+ d0:05:4c:4d:15:fb:5c:70:1e:0c:32:6d:d7:67:5b:
+ ec:b2:61:83:e3:f0:b1:78:aa:30:45:86:f9:6d:f5:
+ 48:1f:f1:90:06:25:db:71:ed:af:d7:0d:65:65:70:
+ 89:d4:c8:c8:23:a0:67:22:de:d9:6e:1d:44:38:cf:
+ 0f:eb:2c:fe:79:01:d7:98:15:5f:22:42:3f:ee:c9:
+ 16:eb:b9:25:08:9a:2a:11:74:47:e0:51:75:8c:ae:
+ eb:8d:b5:30:fe:48:98:0a:9e:ba:6e:a4:60:08:81:
+ c6:05:a0:97:38:70:c0:1f:b4:27:96:8e:c3:d2:c1:
+ 14:5f:34:16:91:7d:ad:4c:e9:23:07:f0:42:86:78:
+ 11:a1:1e:9d:f3:d0:41:09:06:7d:5c:89:ef:d2:0d:
+ 6c:d5
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ C9:00:3A:28:CC:6A:75:57:82:81:00:A6:25:48:6C:CE:0A:A0:4A:59
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ d1:55:e3:5c:43:8c:4f:d3:29:8d:74:4a:1d:23:50:17:27:b3:
+ 30:6f:c6:d7:4c:6c:96:7e:52:a0:2f:91:92:b3:f5:4c:a1:ca:
+ 88:62:31:e4:d6:64:ac:40:17:47:00:24:e8:0d:3b:7b:c7:d4:
+ 7f:3a:76:45:27:fd:9b:ae:9d:44:71:8f:ab:62:60:e5:9d:e8:
+ 59:dd:0e:25:17:14:f8:83:b0:b6:fc:5f:27:8b:69:a2:dc:31:
+ b9:17:a1:27:92:96:c1:73:bf:a3:f0:b8:97:b9:e2:fb:97:6d:
+ 44:01:b0:68:68:47:4b:84:56:3b:19:66:f8:0b:6c:1b:f5:44:
+ a9:ae
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAuCgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgwNzE3
+MTkxN1oXDTQwMTIyMjE3MTkxN1owbzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRQwEgYDVQQDDAtjbHVzdGVydGVzdDCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJjsAW70ro4WyIeiRIagRVzKgla6DalgvwdA
+2ttwM6bC7J3h8Nr+ufmsIzNk5mNxzKIN64a8MTKqMOYdXW39RfQv3HKTvJIn92pa
+GAT3ZNBqPKkU9p6dWCb0FpN+PS48nlRBTRrhvbTP0AVMTRX7XHAeDDJt12db7LJh
+g+PwsXiqMEWG+W31SB/xkAYl23Htr9cNZWVwidTIyCOgZyLe2W4dRDjPD+ss/nkB
+15gVXyJCP+7JFuu5JQiaKhF0R+BRdYyu6421MP5ImAqeum6kYAiBxgWglzhwwB+0
+J5aOw9LBFF80FpF9rUzpIwfwQoZ4EaEenfPQQQkGfVyJ79INbNUCAwEAAaN7MHkw
+CQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2Vy
+dGlmaWNhdGUwHQYDVR0OBBYEFMkAOijManVXgoEApiVIbM4KoEpZMB8GA1UdIwQY
+MBaAFAdBGTqffsW3Ik63vNXf5PwJuGQWMA0GCSqGSIb3DQEBBQUAA4GBANFV41xD
+jE/TKY10Sh0jUBcnszBvxtdMbJZ+UqAvkZKz9UyhyohiMeTWZKxAF0cAJOgNO3vH
+1H86dkUn/ZuunURxj6tiYOWd6FndDiUXFPiDsLb8XyeLaaLcMbkXoSeSlsFzv6Pw
+uJe54vuXbUQBsGhoR0uEVjsZZvgLbBv1RKmu
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCY7AFu9K6OFsiH
+okSGoEVcyoJWug2pYL8HQNrbcDOmwuyd4fDa/rn5rCMzZOZjccyiDeuGvDEyqjDm
+HV1t/UX0L9xyk7ySJ/dqWhgE92TQajypFPaenVgm9BaTfj0uPJ5UQU0a4b20z9AF
+TE0V+1xwHgwybddnW+yyYYPj8LF4qjBFhvlt9Ugf8ZAGJdtx7a/XDWVlcInUyMgj
+oGci3tluHUQ4zw/rLP55AdeYFV8iQj/uyRbruSUImioRdEfgUXWMruuNtTD+SJgK
+nrpupGAIgcYFoJc4cMAftCeWjsPSwRRfNBaRfa1M6SMH8EKGeBGhHp3z0EEJBn1c
+ie/SDWzVAgMBAAECggEAfogRK5Dz+gfqByiCEO7+VagOrtolwbeWeNb2AEpXwq1Z
+Ac5Y76uDkI4ZVkYvx6r6ykBAWOzQvH5MFavIieDeiA0uF/QcPMcrFmnTpBBb74No
+C/OXmGjS7vBa2dHDp8VqsIaT2SFeSgUFt8yJoB2rP+3s47E1YYWTVYoQioO3JQJN
+f0mSuvTnvJO9lbTWiW+yWGVkQvIciCCnHkCEwU0fHht8IoFBGNFlpWZcGiMeietr
+16GdRcmAq95q8TTCeQxkgmmL+0ZJ1BrF7llG2pGYdacawXj1eVRqOHQaFIlcKe05
+RITpuXVYOWBpBpfbQsBZaCGLe7WxHJedrFxdbqm0ZQKBgQDLUQrmIl2wz43t3sI+
+WjW6y1GwMPG9EjXUT1Boq6PNHKgw04/32QNn5IMmz4cp2Mgyz7Hc0ABDU/ZATujd
+yCkxVErPbKRDKSxSl6nLXtLpLbHFmVPfKPbNKIuyFMBsOFOtoFoVbo33wI5dI7aO
+i7sTGB3ngbq4pzCJ9dVt/t81QwKBgQDAjAtBXS8WB69l9w35tx+MgYG0LJ+ykAug
+d91pwiWqSt02fZ0nr/S/76G6B4C8eqeOnYh1RzF5isLD246rLD2Y+uuFrgasvSiS
+4qSKbpG2kk02R/DRTAglAyXI0rhYIDrYKCQPWqNMWpawT/FQQwbFjTuhmz10FyXS
+hmVztZWoBwKBgQCBdnptLibghllGxViEoaai6gJ7Ib9ceHMEXPjDnb+wxPWoGZ8L
+4AjWJ+EHXpAfqmVYTX5hL6VrOdSNAHIxftoUCiuUxwYVqesKMH6y/A9q4WjYfRi1
++fyliJLjc2lPv9IwtfGGwh3uS5ObZTlCrWES+IFaP/YozHUQ9BPSdb+lxwKBgB35
+Lv9b3CqXw6why2EmKpkax/AeSjXnyoeOYT9HY8mgodMLtt0ovPbr/McSx+2PQmon
+B8kJ7h+3hB4tHYZz+prH5MYIky1svNYwxeBu2ewL1k0u4cQTC+mHFeivNNczHTXs
++cASIf2O1IpZx3zxEirKk4/StLxPpimhlkVu7P8dAoGBAJVw2U70+PagVBPtvheu
+ZDEvxSEzrn90ivIh7Y6ZIwdSOSLW04sOVL2JAzO155u4g77jdmcxV3urr1vD9LbF
+qkBGLXx7FFC/Mn/H42qerxr16Bt6RtvVpms71UIQLYxA7caab9cqoyt0wkgqJFKX
+fj0TVODnIf+zPMDCu+frpLbA
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..e2ca646b63a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,202 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl.pem
new file mode 100644
index 00000000000..dce0a0fb3f1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQ3NDFaFw00
+MDA0MjgxODQ3NDFaoA4wDDAKBgNVHRQEAwIBCzANBgkqhkiG9w0BAQUFAAOBgQAu
+PlPDGei2q6kdkoHe8vmDuts7Hm/o9LFbBmn0XUcfHisCJCPsJTyGCsgnfIiBcXJY
+1LMKsQFnYGv28rE2ZPpFg2qNxL+6qUEzCvqaHLX9q1V0F+f8hHDxucNYu52oo/h0
+uNZxB1KPFI2PReG5d3oUYqJ2+EctKkrGtxSPzbN0gg==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..85eeaff5543
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBujCCASMCAQEwDQYJKoZIhvcNAQEFBQAwgZIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwF
+MTBHZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3Jp
+dHkxGzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1cxcNMTMxMjA2MTUzMzUwWhcN
+MTQwMTA1MTUzMzUwWjBMMBICAQwXDTEzMTIwNjE1MjczMFowGgIJAJGUg/wuW1KD
+Fw0xMjEyMTIxODQ4MjJaMBoCCQCRlIP8LltShRcNMTIxMjEyMTg0ODUyWqAOMAww
+CgYDVR0UBAMCAQ4wDQYJKoZIhvcNAQEFBQADgYEAERPfPdQnIafo1lYbFEx2ojrb
+eYqvWN9ykTyUGq2bKv+STYiuaKUz6daGVjELjn/safn5wHkYr9+C/kRRoCor5HYw
+N3uxHnkMpl6Xn7kgXL2b0jbdvfa44faOXdH2gbhzd8bFsOMra4QJHT6CgpYb3ei1
++ePhAd1KS7tS/dyyP4c=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_expired.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..88307503240
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/crl_expired.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQwNTBaFw0x
+MzAxMTExODQwNTBaoA4wDDAKBgNVHRQEAwIBAzANBgkqhkiG9w0BAQUFAAOBgQBs
+jyvEdX8o0+PfRJsEv5oLwgp5y+YmKjRlXg2oj/ETxBDKNYtBY7B9Uu9q0chFtwTu
+XMXeEFWuxnKG+4Ovp6JmNcCKkttUwsWQuR6dGpClW6ttTk0putAWtDnqukTPlEQ2
+XU3wco7ZgrTphvuGpaIQLM1sQg9x8SfW3q6/hxYm3A==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fts.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fun.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fun.js
new file mode 100644
index 00000000000..276f32a8f40
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/fun.js
@@ -0,0 +1,32 @@
+// General high-order functions
+
+function forEach (action, array) {
+ for (var i = 0; i < array.length; i++)
+ action (array[i]);
+}
+
+function foldl (combine, base, array) {
+ for (var i = 0; i < array.length; i++)
+ base = combine (base, array[i]);
+ return base
+}
+
+function foldr (combine, base, array) {
+ for (var i = array.length - 1; i >= 0; i--)
+ base = combine (array[i], base);
+ return base
+}
+
+function map (func, array) {
+ var result = [];
+ for (var i = 0; i < array.length; i++)
+ result.push (func (array[i]));
+ return result
+}
+
+function filter (pred, array) {
+ var result = []
+ for (var i = 0; i < array.length; i++)
+ if (pred (array[i])) result.push (array[i]);
+ return result
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/geo_near_random.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..60cb7733f5d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/geo_near_random.js
@@ -0,0 +1,99 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ for (var i=0; i<nPts; i++){
+ this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ }
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/grid.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/grid.js
new file mode 100644
index 00000000000..3a1253d83cd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/grid.js
@@ -0,0 +1,171 @@
+// Grid infrastructure: Servers, ReplicaSets, ConfigSets, Shards, Routers (mongos). Convenient objects and functions on top of those in shell/servers.js -Tony
+
+load('jstests/libs/fun.js')
+load('jstests/libs/network.js')
+
+// New servers and routers take and increment port number from this.
+// A comment containing FreshPorts monad implies reading and incrementing this, IO may also read/increment this.
+var nextPort = 31000
+
+/*** Server is the spec of a mongod, ie. all its command line options.
+ To start a server call 'begin' ***/
+// new Server :: String -> FreshPorts Server
+function Server (name) {
+ this.addr = '127.0.0.1';
+ this.dirname = name + nextPort;
+ this.args = { port : nextPort++,
+ noprealloc : '',
+ smallfiles : '',
+ rest : '',
+ oplogSize : 8 }
+}
+
+// Server -> String <addr:port>
+Server.prototype.host = function() {
+ return this.addr + ':' + this.args.port
+}
+
+// Start a new server with this spec and return connection to it
+// Server -> IO Connection
+Server.prototype.begin = function() {
+ return startMongodTest(this.args.port, this.dirname, false, this.args);
+}
+
+// Stop server and remove db directory
+// Server -> IO ()
+Server.prototype.end = function() {
+ print('Stopping mongod on port ' + this.args.port)
+ stopMongod (this.args.port)
+ resetDbpath (MongoRunner.dataPath + this.dirname)
+}
+
+// Cut server from network so it is unreachable (but still alive)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function cutServer (conn) {
+ var addrport = parseHost (conn.host)
+ cutNetwork (addrport.port)
+}
+
+// Ensure server is connected to network (undo cutServer)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function uncutServer (conn) {
+ var iport = parseHost (conn.host)
+ restoreNetwork (iport.port)
+}
+
+// Kill server process at other end of this connection
+function killServer (conn, _signal) {
+ var signal = _signal || 15
+ var iport = parseHost (conn.host)
+ stopMongod (iport.port, signal)
+}
+
+/*** ReplicaSet is the spec of a replica set, ie. options given to ReplicaSetTest.
+ To start a replica set call 'begin' ***/
+// new ReplicaSet :: String -> Int -> FreshPorts ReplicaSet
+function ReplicaSet (name, numServers) {
+ this.name = name
+ this.host = '127.0.0.1'
+ this.nodes = numServers
+ this.startPort = nextPort
+ this.oplogSize = 40
+ nextPort += numServers
+}
+
+// Start a replica set with this spec and return ReplSetTest, which hold connections to the servers including the master server. Call ReplicaSetTest.stopSet() to end all servers
+// ReplicaSet -> IO ReplicaSetTest
+ReplicaSet.prototype.begin = function() {
+ var rs = new ReplSetTest(this)
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+// Create a new server and add it to replica set
+// ReplicaSetTest -> IO Connection
+ReplSetTest.prototype.addServer = function() {
+ var conn = this.add()
+ nextPort++
+ this.reInitiate()
+ this.awaitReplication(60000)
+ assert.soon(function() {
+ var doc = conn.getDB('admin').isMaster()
+ return doc['ismaster'] || doc['secondary']
+ })
+ return conn
+}
+
+/*** ConfigSet is a set of specs (Servers) for sharding config servers.
+ Supply either the servers or the number of servers desired.
+ To start the config servers call 'begin' ***/
+// new ConfigSet :: [Server] or Int -> FreshPorts ConfigSet
+function ConfigSet (configSvrsOrNumSvrs) {
+ if (typeof configSvrsOrNumSvrs == 'number') {
+ this.configSvrs = []
+ for (var i = 0; i < configSvrsOrNumSvrs; i++)
+ this.configSvrs.push (new Server ('config'))
+ } else
+ this.configSvrs = configSvrs
+}
+
+// Start config servers, return list of connections to them
+// ConfigSet -> IO [Connection]
+ConfigSet.prototype.begin = function() {
+ return map (function(s) {return s.begin()}, this.configSvrs)
+}
+
+// Stop config servers
+// ConfigSet -> IO ()
+ConfigSet.prototype.end = function() {
+ return map (function(s) {return s.end()}, this.configSvrs)
+}
+
+/*** Router is the spec for a mongos, ie, its command line options.
+ To start a router (mongos) call 'begin' ***/
+// new Router :: ConfigSet -> FreshPorts Router
+function Router (configSet) {
+ this.args = { port : nextPort++,
+ v : 0,
+ configdb : map (function(s) {return s.host()}, configSet.configSvrs) .join(','),
+ chunkSize : 1}
+}
+
+// Start router (mongos) with this spec and return connection to it.
+// Router -> IO Connection
+Router.prototype.begin = function() {
+ return startMongos (this.args);
+}
+
+// Stop router
+// Router -> IO ()
+Router.prototype.end = function() {
+ return stopMongoProgram (this.args.port)
+}
+
+// Add shard to config via router (mongos) connection. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> IO ()
+function addShard (routerConn, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({addshard: repSetOrHostName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> IO ()
+function enableSharding (routerConn, dbName) {
+ var ack = routerConn.getDB('admin').runCommand ({enablesharding: dbName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> String -> String -> IO ()
+function shardCollection (routerConn, dbName, collName, shardKey) {
+ var ack = routerConn.getDB('admin').runCommand ({shardcollection: dbName + '.' + collName, key: shardKey})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Move db from its current primary shard to given shard. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> String -> IO ()
+function moveDB (routerConn, dbname, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({moveprimary: dbname, to: repSetOrHostName})
+ printjson(ack)
+ assert (ack['ok'], tojson(ack))
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key1 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key2 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e181139b5d9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 8 (0x8)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:31:58 2013 GMT
+ Not After : Mar 23 14:31:58 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=127.0.0.1
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:aa:e1:a0:6c:09:dc:fd:d0:9f:0f:b6:77:40:60:
+ f9:01:f9:9e:55:20:fe:88:04:93:c9:ab:96:93:3a:
+ ed:7e:7d:ad:e4:eb:a7:e9:07:35:ef:6e:14:64:dd:
+ 31:9b:e5:24:06:18:bb:60:67:e3:c5:49:8e:79:b6:
+ 78:07:c1:64:3f:de:c1:7d:1b:a9:96:35:d5:f9:b8:
+ b4:5e:2a:34:b7:d0:19:ad:f6:8a:00:ef:8e:b0:d5:
+ 36:1f:66:a0:7a:7d:cf:f0:98:3c:ee:0f:be:67:d2:
+ de:c3:e6:b8:79:2f:64:40:0c:39:15:97:8c:13:da:
+ 1b:db:5c:bb:a3:43:0b:74:c7:46:55:9b:ea:d7:93:
+ d5:15:2f:d1:34:ac:a9:99:3b:01:f0:c1:d7:42:89:
+ 24:bb:ab:60:99:c1:4d:9f:bf:9a:a3:92:3a:58:05:
+ e2:47:a6:8e:71:b2:0a:32:b0:c5:cc:a0:58:40:bf:
+ 09:a7:76:f5:37:ce:90:71:e0:75:89:17:ea:fb:80:
+ 24:a1:9d:6e:1b:7e:e3:44:52:d3:fe:e3:de:80:9a:
+ 8e:c3:4f:8c:bb:b4:8c:d2:a9:a9:aa:af:90:ac:b4:
+ ee:6b:d2:c5:71:1e:08:7f:4c:b6:2a:5f:13:7a:e3:
+ 29:f7:2e:bb:f7:c5:48:0a:4e:2e:1e:d4:2c:40:b3:
+ 4c:19
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 0E:3F:54:C4:77:85:FF:93:58:A7:24:23:32:35:73:B0:BE:8C:C3:BB
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 4c:9d:31:81:b5:e9:6a:64:4c:1e:eb:91:7f:f1:66:74:46:13:
+ 19:cb:f2:3b:9a:41:f2:83:67:32:53:a6:cd:33:37:4c:92:a6:
+ 36:d4:f3:0b:56:a2:2b:66:f1:09:a7:06:36:b8:83:b7:31:70:
+ fe:bf:af:b5:3d:59:f3:f2:18:48:c7:6c:b0:90:8c:24:47:30:
+ 53:8d:c5:3e:7c:7b:33:53:15:ec:bd:8a:83:ed:05:e8:8b:21:
+ d7:65:39:69:95:c8:58:7d:4f:1b:32:51:85:2d:4d:8b:be:00:
+ 60:17:83:9b:2b:13:43:05:78:db:a4:2e:a2:cb:31:34:7e:b9:
+ 8a:72
+-----BEGIN CERTIFICATE-----
+MIIDZDCCAs2gAwIBAgIBCDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+MzE1OFoXDTQxMDMyMzE0MzE1OFowXDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjES
+MBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAquGgbAnc/dCfD7Z3QGD5AfmeVSD+iASTyauWkzrtfn2t5Oun6Qc1724UZN0x
+m+UkBhi7YGfjxUmOebZ4B8FkP97BfRupljXV+bi0Xio0t9AZrfaKAO+OsNU2H2ag
+en3P8Jg87g++Z9Lew+a4eS9kQAw5FZeME9ob21y7o0MLdMdGVZvq15PVFS/RNKyp
+mTsB8MHXQokku6tgmcFNn7+ao5I6WAXiR6aOcbIKMrDFzKBYQL8Jp3b1N86QceB1
+iRfq+4AkoZ1uG37jRFLT/uPegJqOw0+Mu7SM0qmpqq+QrLTua9LFcR4If0y2Kl8T
+euMp9y6798VICk4uHtQsQLNMGQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU
+Dj9UxHeF/5NYpyQjMjVzsL6Mw7swHwYDVR0jBBgwFoAUB0EZOp9+xbciTre81d/k
+/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEATJ0xgbXpamRMHuuRf/FmdEYTGcvyO5pB
+8oNnMlOmzTM3TJKmNtTzC1aiK2bxCacGNriDtzFw/r+vtT1Z8/IYSMdssJCMJEcw
+U43FPnx7M1MV7L2Kg+0F6Ish12U5aZXIWH1PGzJRhS1Ni74AYBeDmysTQwV426Qu
+ossxNH65inI=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCq4aBsCdz90J8P
+tndAYPkB+Z5VIP6IBJPJq5aTOu1+fa3k66fpBzXvbhRk3TGb5SQGGLtgZ+PFSY55
+tngHwWQ/3sF9G6mWNdX5uLReKjS30Bmt9ooA746w1TYfZqB6fc/wmDzuD75n0t7D
+5rh5L2RADDkVl4wT2hvbXLujQwt0x0ZVm+rXk9UVL9E0rKmZOwHwwddCiSS7q2CZ
+wU2fv5qjkjpYBeJHpo5xsgoysMXMoFhAvwmndvU3zpBx4HWJF+r7gCShnW4bfuNE
+UtP+496Amo7DT4y7tIzSqamqr5CstO5r0sVxHgh/TLYqXxN64yn3Lrv3xUgKTi4e
+1CxAs0wZAgMBAAECggEADtdh04BXzUOdTQQP/2tstRs1ATfIY4/iNhXNEiSAFAhe
+Xg+Jmdeie5UX+FqtwFh6dH0ZaRoc0jm9Qhzy99l4F4QFUhRg+kbausGsCLGpun08
+fbt36PTlc75Q4RFMxta+hKr0P8jmRKYv6tvTEdNn5ZgqLRHofKDo4nh/Y4KjMBUq
+VIMUu+VO9Ol2GPlZVRBaJec0E1+HUyzaK5JVUIFh4atcrHyXxae+rY9o6G57BBEj
+ZzlahfMI5aYj9HhXnB8RuhVBuIZBNSA41nxHmOs6JBQsatVML51RFIV4KPU+AyDR
+bdYXHJehRIUF8RL92aHjGYsvXdSxVhuUBqMIQhOwAQKBgQDUtj+p+7SHpLyQIZpU
+EQFK+42LDc6zF4uJVjq1d8fC2Hrmz8PLs0KcH36VWNbo48B3iFiPWIMID5xwLuIb
+FkLOzJ8QrbILn0zcu/hplrCiy6PZas3rpLJ+X406wLQeCikOLhQkz+cuKuQmvWkK
+eyqwBIIxg8t5dTtTAmu3w/DDgQKBgQDNqByxKduTgEND1+isUOt+L/ipR3SzXQ4m
+ZsOKiSxyXxge0/CUxPxO6WeEVGQ7bGAr5yQD9ukvJnCo3phYcuRRj+RTMrTL73Kz
+p/cyOUx2NMUIgURTsO+s3D0lC4+NmoDge0roeEDX+/lFNjqgRKJ+1LUimqbo5uNE
+EupkyTh0mQKBgGw/81ZGSjFdnLic4TU3Ejlem0HQ3Qg3S0OxJl+DfZ2jHaiowzO/
+Hn7laD4I4BXVEfXC5Y7NtKE9kJdmxJqUUZt8dta+DoXro+oRnvHdRjcS+2eB+xmY
+z12QswbbWs6OzSXyPT4er7/HBCTS78nttGOvZ7JbKAm/p1kvOjJi/PwBAoGAE7Tw
+Sum/6Lp5t56Q5TI73rOqGE6ImEdqe7ONOVE7uRnzrcCRZTAbHVSwXrXXhPo1nP9h
+LCAU6De+w+/QmWkpB8fKEU7ilEg1rZGC1oU3FnyoBNCeQ4bI8L+J/GrHLsKHZvtp
+ii07yXaTxFYV+BWbnJu1X8OCCv9U98j4PQArMMECgYEAm6uLN647vb+ZhzNBMtsX
+1wnMSgzbgGpgjhWwk6dNmw8YJNKg9CFa8sQ8N7yKXWBEF/RkU0kfzZL8iddHEb/k
+Ti1BlwrEzFfIQLlBfv47tYWOj8ZxN0ujlzUoN2VAC25LZhjcQCo3ftBk2lkrmllu
+MxjxBfRk/teUdRl80oi5R0w=
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..beb0bb91b61
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,100 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 9 (0x9)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:45:13 2013 GMT
+ Not After : Mar 23 14:45:13 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=santesthostname.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:c9:83:7a:75:42:cf:35:a4:95:c7:c8:d8:4d:19:
+ 0e:89:87:d5:bd:f9:2f:ee:20:2c:4c:ca:6d:0b:c1:
+ 10:5b:06:1b:c4:a1:26:12:25:06:7a:1e:d1:e6:d0:
+ 91:2b:a3:c8:74:de:95:10:d9:ff:20:03:ec:84:db:
+ 49:d9:a4:e9:c2:93:f0:d2:32:01:a6:55:db:14:bf:
+ 16:fe:88:e0:e4:46:0f:6a:bd:27:95:45:2e:8d:13:
+ e2:99:09:74:e4:2b:32:c3:6d:61:0c:86:85:eb:12:
+ f5:dc:9e:7b:d3:00:a3:ce:f4:8a:4b:51:7f:a2:c6:
+ 0b:52:a4:f1:41:d5:01:53:88:99:b9:3b:29:f8:43:
+ 5e:a4:c7:41:d9:d3:34:43:f2:c7:a6:8d:22:1c:f9:
+ b2:63:cb:df:83:9c:6f:ec:e3:b0:63:af:0b:51:c9:
+ 20:ca:c2:59:c1:2c:ec:de:37:18:76:3d:73:85:82:
+ 12:11:cd:b6:ef:2f:7b:64:cd:a3:2d:f6:7a:54:7f:
+ b3:4f:c9:38:f4:62:b6:da:00:f0:59:df:e1:d3:15:
+ ca:4b:73:6c:22:c1:9a:c1:51:c4:28:59:0f:71:2a:
+ 39:e9:17:08:9d:b0:88:61:a7:53:67:da:dc:fb:6e:
+ 38:f7:a8:cd:cd:88:ed:d9:4c:88:f4:a4:75:5e:3f:
+ 8b:ff
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ OpenSSL Certificate for SSL Server
+ X509v3 Subject Alternative Name:
+ DNS:*.example.com, DNS:127.0.0.1, DNS:morefun!, IP Address:154.2.2.3, email:user@host.com
+ Signature Algorithm: sha1WithRSAEncryption
+ 0b:82:c6:7d:e0:ba:71:24:d6:a8:f4:cb:6f:0f:f6:69:28:32:
+ 98:81:e6:14:49:81:07:ff:92:dd:0a:a4:68:3c:92:00:e5:8c:
+ 43:d1:29:04:4a:5e:f2:b1:db:d2:ca:5d:7d:fc:fe:7b:f5:01:
+ 65:87:25:cd:4c:68:09:16:bd:c7:b0:a4:d2:89:5e:dd:92:44:
+ 6c:6e:7a:fe:7e:05:e2:2b:56:96:96:16:44:4a:01:87:8f:0c:
+ df:35:88:97:3e:e5:21:23:a2:af:87:ad:ee:f7:9e:05:36:f7:
+ 96:88:c8:fa:92:33:c2:60:2e:14:d9:ea:34:ab:04:a6:78:04:
+ be:da
+-----BEGIN CERTIFICATE-----
+MIIDjDCCAvWgAwIBAgIBCTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+NDUxM1oXDTQxMDMyMzE0NDUxM1owZjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEc
+MBoGA1UEAwwTc2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMmDenVCzzWklcfI2E0ZDomH1b35L+4gLEzKbQvBEFsGG8Sh
+JhIlBnoe0ebQkSujyHTelRDZ/yAD7ITbSdmk6cKT8NIyAaZV2xS/Fv6I4ORGD2q9
+J5VFLo0T4pkJdOQrMsNtYQyGhesS9dyee9MAo870iktRf6LGC1Kk8UHVAVOImbk7
+KfhDXqTHQdnTNEPyx6aNIhz5smPL34Ocb+zjsGOvC1HJIMrCWcEs7N43GHY9c4WC
+EhHNtu8ve2TNoy32elR/s0/JOPRittoA8Fnf4dMVyktzbCLBmsFRxChZD3EqOekX
+CJ2wiGGnU2fa3PtuOPeozc2I7dlMiPSkdV4/i/8CAwEAAaOBmDCBlTAJBgNVHRME
+AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiT3BlblNTTCBD
+ZXJ0aWZpY2F0ZSBmb3IgU1NMIFNlcnZlcjBCBgNVHREEOzA5gg0qLmV4YW1wbGUu
+Y29tggkxMjcuMC4wLjGCCG1vcmVmdW4hhwSaAgIDgQ11c2VyQGhvc3QuY29tMA0G
+CSqGSIb3DQEBBQUAA4GBAAuCxn3gunEk1qj0y28P9mkoMpiB5hRJgQf/kt0KpGg8
+kgDljEPRKQRKXvKx29LKXX38/nv1AWWHJc1MaAkWvcewpNKJXt2SRGxuev5+BeIr
+VpaWFkRKAYePDN81iJc+5SEjoq+Hre73ngU295aIyPqSM8JgLhTZ6jSrBKZ4BL7a
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJg3p1Qs81pJXH
+yNhNGQ6Jh9W9+S/uICxMym0LwRBbBhvEoSYSJQZ6HtHm0JEro8h03pUQ2f8gA+yE
+20nZpOnCk/DSMgGmVdsUvxb+iODkRg9qvSeVRS6NE+KZCXTkKzLDbWEMhoXrEvXc
+nnvTAKPO9IpLUX+ixgtSpPFB1QFTiJm5Oyn4Q16kx0HZ0zRD8semjSIc+bJjy9+D
+nG/s47BjrwtRySDKwlnBLOzeNxh2PXOFghIRzbbvL3tkzaMt9npUf7NPyTj0Yrba
+APBZ3+HTFcpLc2wiwZrBUcQoWQ9xKjnpFwidsIhhp1Nn2tz7bjj3qM3NiO3ZTIj0
+pHVeP4v/AgMBAAECggEAbaQ12ttQ9rToMd2bosdBW58mssiERaIHuHhjQIP5LC10
+qlWr6y9uCMAAIP/WHNJuXPhGTvbtkzPPWrIdymeqMI5h91vx/di07OLT1gYPpuRf
+uwnUIamUnHn3TqEQkpzWb/JxXWlMMA0O7MzmPnYYqp/vJu/e7Geo/Xx1MAZ/RD0U
+YUvrjAyHcor01VVa/eV69jL+6x9ExFNmRYRbmjmK/f10R4o86nIfqhXbM8qKsT6x
+1U/S2I4oModm0x12PgiMDMDzVD+cNE/h8lSnFtBTNEY3xRe7CZnhMV4nBVGjWi9D
+XjcIBA0kGd4G10ploiF+37J/PQbyodLA/Y30BIYCkQKBgQD6XvEzd4DbBa08pcCa
+CYZd5pyAHur1GzJ4rTQNqB84hzuyG6dKkk0rPXjExrj/GAtGWg2ohggmC5OPInKM
+WdpMC56Q0aZYMId3Be/Wg4kRgFO0YOsrx0dRVi5nwbRXkMjXbfewSopwbzP5hIo1
+7rfOhdhbjXx6W269FPE4Epmj1QKBgQDOC1QjGeEzwEgSq3LuojRLHFo31pWYr7UU
+sxhpoWMB6ImPMVjXaEsRKfc7Gulpee1KVQLVmzbkqrHArVNXEpuG4egRwZ10UJ0L
+v4PqrElyHKxgAvllflkkMSX4rx791T+AZMq6W5VX1fKiojfvSLzmEFaI6VmS43GZ
+KCz9RFbegwKBgHSE4vP01b8YsTrcWPpXHHVu8b6epPJVKfQHh4YjjAQey6VkQULv
+O4K4JRBO+6GcawLeviSD3B74nD+s5Gp1Fqb1cWIsb6HzU9gMp0XKCWxfsJTt1gSV
+xZcQ6J/ZAjkOZKn9v5wH1M3msuWYzUm0Q06V888H1bqL+sl8iZZy8ZXRAoGBALf6
+GZh2BUYGTNSOzkMSBouCt3PgYRdC3PesqwG2nwcXMazwLRm6AD1FMYJPF1edDSow
+GiXNQAiR+cHHggDflourr2IbdZJkYLYavZmPWM1RmQDp5vKfDM1qLTOOeqe//8GP
+Pg2EtScG3G4nVraMRk9PC1WYtuiXudk9rF5A5SgtAoGBAL1oVSnQpi5tzBNJqhzM
+mQIF7ct5WNj2b1lKqqsXUTd2pcgMCRrryatqH+gLz1rAjtbVfx2FAYkutH5TFgqP
+c4uomUH3so1EjEA8GtFS9SSkLn5nIr4TnVy4+Qsr1svOo8mhtztORXz+xOTxR6ud
+p7rd/YEbc5GhNSXlcW+apZW+
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockkrb5.conf b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockservice.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockuser.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/network.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/network.js
new file mode 100644
index 00000000000..e5b33f3219e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/network.js
@@ -0,0 +1,37 @@
+
+// Parse "127.0.0.1:300" into {addr: "127.0.0.1", port: 300},
+// and "127.0.0.1" into {addr: "127.0.0.1", port: undefined}
+function parseHost (hostString) {
+ var items = hostString.match(/(\d+.\d+.\d+.\d+)(:(\d+))?/)
+ return {addr: items[1], port: parseInt(items[3])}
+}
+
+
+/* Network traffic shaping (packet dropping) to simulate network problems
+ Currently works on BSD Unix and Mac OS X only (using ipfw).
+ Requires sudo access.
+ TODO: make it work on Linux too (using iptables). */
+
+var nextRuleNum = 100 // this grows indefinitely but can't exceed 65534, so can't call routines below indefinitely
+var portRuleNum = {}
+
+// Cut network connection to local port by dropping packets using iptables
+function cutNetwork (port) {
+ portRuleNum[port] = nextRuleNum
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any to any ' + port)
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any ' + port + ' to any')
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
+
+// Restore network connection to local port by not dropping packets using iptables
+function restoreNetwork (port) {
+ var ruleNum = portRuleNum[port]
+ if (ruleNum) {
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum++)
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum)
+ delete portRuleNum[port]
+ }
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/parallelTester.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..d5cb5346abe
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/password_protected.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..87976e7a574
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIgWTIkEmBBfoCAggA
+MBQGCCqGSIb3DQMHBAjzL6xrCrEygwSCBMihG8kg3nTnTtWAbB+d1D+HJxriqm37
+7rwjkfa+T5w5ZBRGpsTt3QB5ep0maX72H55ns6ukkeMoDBSadhDWrGWcLQ2IOGt3
+E14KU6vMFe3gQkfF1fupp7F+3ma58/VNUKa4X5pzZ7OCf8inlLWejp8BRqbrPWqw
+Errgw1kNN3gWfQMr7JtIt1yI1xIMEB2Z976Jn0gaGnJAtzIW4thqjkDdb8b33S9f
+cb7N1Fq4cly22f9HdqNcLgVTi1zIlPXc/f/6mtsGTsJv/rMPthJ7c3Smvh3Fce2G
+w8e+ypfey+9QG3fk7RslaFRe8ShgqfdR8CAalp2UzwNbX91Agyuim3TA6s4jM8N9
+cF6CXlqEaA4sKhiOJmw69DfTC7QRee/gi2A8bz17pX85nKrGiLYn+Od8CEhTFxVk
+lNgBLv4+RcYHVqxWlbJMdDliMN53E+hYbh0y+GDLjteEXbrxRo1aSgd/9PGiSl97
+KY4F7b/OwRzRZh1F+cXY+uP5ZQMbx5EMMkhzuj3Hiy/AVlQrW2B1lXtcf11YFFJj
+xWq6YcpmEjL+xRq1PgoU7ahl6K0A3ScedQA5b1rLdPE8+bkRAfoN+0r8HVkIL7M+
+PorrwuWnvUmovZ0yDvm153HVvRnKZKHcelklphuUWfXvcRNITG/Rx6ssj+MVjqjb
+Xy7t7wgIrk10TFWNEcunGjSSjPDkjYPazJ2dasI0rODzhlQzrnlWM+El9P5zSu2z
+1Bvet44nmAKi2WLMda5YKbJcLSNbpBFB+rTwDt/D+dfwsJeC0sjpzzatKGXNJLJQ
+7x9BZfAbBn0QrIZYGMkaxWvcpJcaVUbCKiST4DK5ze584ptrlH+Bqw4u4xLcVrdk
+hu/8IBNybLrl4zahIz7bRRNmw5wo9zUVXPXEtuYak+MK+gmD3TzJ12OUKAlAj3Go
+Fj3NFQoxBJJjuXM3zZRvHp+/AAOUANBYIyV2WssF6C+SH4o+jKyxWC/GawPFvx/B
+gy55kdEt+ORdcOfV8L5Q2xI8Qpck6E3odmaHCvjz1bUVUWqhJcTuoewHRBfWiWgc
+UCXBS/YgendUQroBOPyYIwTtk4XY9fhhKGI4LhWcx4LfzntBnM9FGmDOwhu3HqEd
+HOs8p+HhB8LPjGRot63m7gkJ1T6AswSi9hTeZeSgXuSgL23zqwPGbGTwO3AmFs/M
+8luXQ4My9bk74K3d9lFdJPaxeTpeeWNodnBItbioT5aImptU+pkKWLTVmXi4V+JE
+1ootg+DSbz+bKp4A/LLOBO4Rsx5FCGAbBMnKc/n8lF86LjKq2PLRfgdPCaVfBrcd
+TnOkBZYU0HwJAc++4AZQJvA/KRB4UPUzMe2atjVxcrr6r6vL8G04+7TBFoynpzJ+
+4KZPCJz0Avb4wYKu/IHkdKL7UY8WEGz1mMDbAu4/xCriLg49D2f1eY3FTEjBotBI
+J9hE4ccmwqlxtl4qCVRezh0C+viJ6q2tCji2SPQviaVMNWiis9cZ52J+F9TC2p9R
+PdatJg0rjuVzfoPFE8Rq8V6+zf818b19vQ4F31J+VXTz7sF8it9IO0w/3MbtfBNE
+pKmMZ9h5RdSw1kXRWXbROR9XItS7gE1wkXAxw11z7jqNSNvhotkJXH/A5qGpTFBl
+Z8A=
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDczCCAtygAwIBAgIBCzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MTgxMFoXDTQxMDQyMjE1MTgxMFowazELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRAwDgYDVQQDDAdsYXphcnVzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA0+uq+UcogTSS+BLNTwwsBU7/HnNNhNgLKnk8pdUC
+UFOzAjXnXlXEravmbhWeIj5TsCElc5FPE66OvmiixFU6l27Z5P8gopjokxll7e1B
+ujeJOXgy5h+K76xdeQ90JmQX4OO0K5rLXvNH3ufuhGr2NObrBz6kbF5Wdr3urPl6
+pFSLH02zPLqPHhhUvO8jcbUD3RrS/5ZGHqE++F+QRMuYeCXTjECA8iLDvQsiqvT6
+qK1y04V/8K0BYJd/yE31H3cvRLUu7mRAkN87lY1Aj0i3dKM/l2RAa3tsy2/kSDH3
+VeUaqjoPN8PTfJaoMZz7xV7C+Zha+JZh3E7pq6viMR6bkwIDAQABo3sweTAJBgNV
+HRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZp
+Y2F0ZTAdBgNVHQ4EFgQUbw3OWXLJpkDMpGnLWM4vxSbwUSAwHwYDVR0jBBgwFoAU
+B0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAL+OC9x0P7Ql+
+8NbONrIeOIoJD++K5rUM0vI+u9RDAxTm9TO6cP7Cl6H4zzvlzJ3w9DL66c2r+ZTy
+BxzFO1wtDKUo5RJKneC0tMz0rJQIWTqo45fDLs8UIDB5t4xp6zed34nvct+wIRaV
+hCjHBaVmILlBWb6OF9/kl1JhLtElyDs=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/server.pem
new file mode 100644
index 00000000000..e5980d4856e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/server.pem
@@ -0,0 +1,34 @@
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAK53miP9GczBWXnq
+NxHwQkgVqsDuesjwJbWilMK4gf3fjnf2PN3qDpnGbZbPD0ij8975pIKtSPoDycFm
+A8Mogip0yU2Lv2lL56CWthSBftOFDL2CWIsmuuURFXZPiVLtLytfI9oLASZFlywW
+Cs83qEDTvdW8VoVhVsxV1JFDnpXLAgMBAAECgYBoGBgxrMt97UazhNkCrPT/CV5t
+6lv8E7yMGMrlOyzkCkR4ssQyK3o2qbutJTGbR6czvIM5LKbD9Qqlh3ZrNHokWmTR
+VQQpJxt8HwP5boQvwRHg9+KSGr4JvRko1qxFs9C7Bzjt4r9VxdjhwZPdy0McGI/z
+yPXyQHjqBayrHV1EwQJBANorfCKeIxLhH3LAeUZuRS8ACldJ2N1kL6Ov43/v+0S/
+OprQeBTODuTds3sv7FCT1aYDTOe6JLNOwN2i4YVOMBsCQQDMuCozrwqftD17D06P
+9+lRXUekY5kFBs5j28Xnl8t8jnuxsXtQUTru660LD0QrmDNSauhpEmlpJknicnGt
+hmwRAkEA12MI6bBPlir0/jgxQqxI1w7mJqj8Vg27zpEuO7dzzLoyJHddpcSNBbwu
+npaAakiZK42klj26T9+XHvjYRuAbMwJBAJ5WnwWEkGH/pUHGEAyYQdSVojDKe/MA
+Vae0tzguFswK5C8GyArSGRPsItYYA7D4MlG/sGx8Oh2C6MiFndkJzBECQDcP1y4r
+Qsek151t1zArLKH4gG5dQAeZ0Lc2VeC4nLMUqVwrHcZDdd1RzLlSaH3j1MekFVfT
+6v6rrcNLEVbeuk4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBCjANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNTEz
+MjU0MFoXDTQxMDQyMTEzMjU0MFowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBAK53miP9GczBWXnqNxHwQkgVqsDuesjwJbWilMK4gf3fjnf2
+PN3qDpnGbZbPD0ij8975pIKtSPoDycFmA8Mogip0yU2Lv2lL56CWthSBftOFDL2C
+WIsmuuURFXZPiVLtLytfI9oLASZFlywWCs83qEDTvdW8VoVhVsxV1JFDnpXLAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBQgCkKiZhUV9/Zo7RwYYwm2cNK6tzAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCbsfr+Q4pty4Fy38lSxoCgnbB4pX6+Ex3xyw5zxDYR3xUlb/uHBiNZ1dBrXBxU
+ekU8dEvf+hx4iRDSW/C5N6BGnBBhCHcrPabo2bEEWKVsbUC3xchTB5rNGkvnMt9t
+G9ol7vanuzjL3S8/2PB33OshkBH570CxqqPflQbdjwt9dg==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..1e2c7391cb1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed succesfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/smoke.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..0f6deb368c5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/smoke.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLSU04xAL7eZ/Y
+J3euMDP/Uq7+a65zEIk7wzD2K5Htosbdysn67l8OzVlF2/IcB0/2SLuHHyC7+4pv
+O2+ndtvi6hr9zF4S8Bz0In/UUb+WzhFHuZd0YLl2arhnYMoDUkyLheVqEcDbECgi
+a6i5SNpAff2eUy29FVGwsaUl7+iEHqYxS9Ibmw1CeQYLEOGyhkTI9BjfO/3HwQyW
+FmOJp/IAJUFRCXTgluaMHptaonX5GmRK64wlF8Reu+uyQRdWM0cK9b3AxbBWAAyT
+SLQto+PW1J7QQ95Kn+aJ8nH1Jj80iUAjx2yAGchl1wfSHf5yAAo4OJNXgKUrQHIs
+dofsw/KTAgMBAAECggEBAItF+SX/BJwNw7lvsMsiMz2mBEZCuA4VMjBDlnPRffT1
+JJInsSG91lppzdPS0JjrWZk+U1xLsz2XJEz4x5JQGG3qPfvL3FfVMcEBMdrg9wX2
+wFgHiwAslGPQ0e3hngWQiOi+H2MALsTm2NhcMghfJUgyCWRDUH7O8FzCGIdZSk/Z
+Bx4CvBad+k+OFvUt03gwGtoCn7XneMRVGt04EU/srg0h6C3810k7+OLC1xZc8jaE
+5UAZwKO4pqJn/w0s9T2eAC+b+1YNuUTLvMTdhfH6ZkANxgcfQHWok14iGxCyXMeQ
+dBHeyNTIYKnfpwjFz85LgEvl4gsUTaa/IM0DfGPDOkECgYEA5z8Px0Sh0DSRr6PW
+3Ki9sDtJP5f+x0ARaebOfkscOJ5YvDejIxVNVBi5PYRtfCyLT78AKpRfxtBDQtW1
+w02xqkh/RR/GZm8hLyh/KzroTA3+GQvMqnE1irkJCKEOWwUjZNAFt+kgZIQWCfbn
+V1CjeK9xnEt00Icn7sh1CKubvakCgYEA4QwKZ2zj10i90NqlAAJlj6NTK/h+bHHw
+6VkUUO93GJZ1cC++dVZRhPTqBRdACJSey4nCMFdO3PLwy2gBG9LwU4rcN0Euo2bm
+J2uBBJVoXySE1250vem9I7KAramtTzQuHtIEvYhB3DHY+oYv4Eg6NSB4zAdtDKiV
+iiP23IN0+9sCgYA0KHconQRab+EEWtIVx0GxxE2LOH9Q9dR3rIWa2tossxqUqX/0
+Y9OjSkhN5dbEEVAC1rP05q6Lq2Hga0+qE5YlMGD0eGxJons7pci5OXo33VgY0h6B
+uzM2bPHqrlkMkqYfEQSZLM4PnfNSoAwiF6Anknrvo91fQ3zwUOqE4CAqsQKBgGX2
+a5xShKRcy8ud1JY9f8BlkmBgtP7zXOCMwJyu8nnMaacLqrJFCqg/wuvNjfCVTaEQ
+aFA4rn2DAMBX/fCaUNK5Hm9WdAgKrgp8Nbda7i/1Ps7Qt8n35f8PeCe2sdQp4x+J
+riYlXxmh6BoRxA1NDDpX3QMr9id/FknBY66jTNRzAoGBALab2GqBYInkmPj1nGDA
+f9+VQWFzl98k0PbLQcvKgbWuxLDf/Pz9lBi9tPzhNuTRt9RLuCMc5ZbpPbHPNWI0
+6+zofHTHoW0+prDdtZqpEE/TKmr8emjYMf4CBIKwW3CwbBRLr9C8G01ClTaan2Ge
+LMUhIseBsaQhmkL8n1AyauGL
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDlzCCAn+gAwIBAgIJAJDxQ4ilLvoVMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV
+BAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAM
+BgNVBAoMBTEwZ2VuMR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTAeFw0x
+MjEyMDQxNTA0MDJaFw0xODA1MjcxNTA0MDJaMGIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAMBgNVBAoMBTEwZ2Vu
+MR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMtJTTjEAvt5n9gnd64wM/9Srv5rrnMQiTvDMPYrke2i
+xt3KyfruXw7NWUXb8hwHT/ZIu4cfILv7im87b6d22+LqGv3MXhLwHPQif9RRv5bO
+EUe5l3RguXZquGdgygNSTIuF5WoRwNsQKCJrqLlI2kB9/Z5TLb0VUbCxpSXv6IQe
+pjFL0hubDUJ5BgsQ4bKGRMj0GN87/cfBDJYWY4mn8gAlQVEJdOCW5owem1qidfka
+ZErrjCUXxF6767JBF1YzRwr1vcDFsFYADJNItC2j49bUntBD3kqf5onycfUmPzSJ
+QCPHbIAZyGXXB9Id/nIACjg4k1eApStAcix2h+zD8pMCAwEAAaNQME4wHQYDVR0O
+BBYEFO6qoBUb1CN4lCkGhaatcjUBKwWmMB8GA1UdIwQYMBaAFO6qoBUb1CN4lCkG
+haatcjUBKwWmMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAGcJdsiI
+JdhJDPkZksOhHZUMMRHLHfWubMGAvuml6hs+SL850DRc+vRP43eF/yz+WbEydkFz
+3qXkQQSG8A2bLOtg0c6Gyi5snUOX0CKcOl3jitgwVkHcdX/v6vbiwALk+r8kJExv
+vpiWIp3nxgLtYVJP/XPoEomEwmu5zWaw28MWXM4XrEjPYmK5ZL16VXXD+lfO0cnT
+2vjkbNK8g7fKaIYYX+cr8GLZi19kO+jUYfhtxQbn8nxUfSjHseAy9BbOLUbGTdAV
+MbGRQveOnFW0eDLjiZffwqCtn91EtYy+vBuYHT/C7Ws4hNwd9lTvmg0SHAm01vi1
+b4fBFFjNvg1wCrU=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/test_background_ops.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..91f50aaa362
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw "Error in parallel ops " + procName + " : "
+ + tojson( result.err )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/testconfig b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/testconfig
new file mode 100644
index 00000000000..0c1fc871d61
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/testconfig
@@ -0,0 +1,4 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/use_extended_timeout.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/use_extended_timeout.js
new file mode 100644
index 00000000000..7f770249214
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/libs/use_extended_timeout.js
@@ -0,0 +1,12 @@
+var _orig_runMongoProgram = runMongoProgram;
+runMongoProgram = function() {
+ var args = [];
+ for (var i in arguments) {
+ args[i] = arguments[i];
+ }
+ var progName = args[0];
+ if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) {
+ args.push("--dialTimeout", "30");
+ }
+ return _orig_runMongoProgram.apply(null, args);
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/misc/biginsert.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/misc/biginsert.js
new file mode 100755
index 00000000000..ebbdc18ba3e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/misc/biginsert.js
@@ -0,0 +1,18 @@
+o = "xxxxxxxxxxxxxxxxxxx";
+o = o + o;
+o + o;
+o = o + o;
+o = o + o;
+o = o + o;
+
+var B = 40000;
+var last = new Date();
+for (i = 0; i < 30000000; i++) {
+ db.foo.insert({ o: o });
+ if (i % B == 0) {
+ var n = new Date();
+ print(i);
+ print("per sec: " + B*1000 / (n - last));
+ last = n;
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/replsets/rslib.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..6a16db232e4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getPrimary().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csv1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csv1.js
new file mode 100644
index 00000000000..5eb7ab0249a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csv1.js
@@ -0,0 +1,42 @@
+// csv1.js
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( tojson( base ) , tojson(x) , "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport2.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..3e0dd2c6829
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvimport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..3bff1110cbe
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/a.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpauth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..7f5e39cc1da
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpauth.js
@@ -0,0 +1,27 @@
+// dumpauth.js
+// test mongodump with authentication
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+adminDB = m.getDB( "admin" );
+
+t = m.getDB( baseName );
+t.dropDatabase();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+adminDB.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+
+assert( adminDB.auth( "testuser" , "testuser" ) , "auth failed" );
+
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpfilename1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..fbe24551929
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpfilename1.js
@@ -0,0 +1,14 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+c.getCollection("df/").insert({ a: 3 })
+assert(c.getCollection("df/").count() > 0) // check write worked
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..fd1e8789ea6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore1.js
@@ -0,0 +1,23 @@
+// dumprestore1.js
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore10.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..49f008ea591
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore10.js
@@ -0,0 +1,63 @@
+// simple test to ensure write concern functions as expected
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..f1e5941cbd0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore3.js
@@ -0,0 +1,60 @@
+// dumprestore3.js
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, 1, "mongorestore should exit w/ -1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore4.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..568e196061f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore4.js
@@ -0,0 +1,42 @@
+// dumprestore4.js -- see SERVER-2186
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore6.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..d8b349e9589
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore7.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..a71725f434b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore8.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..4e6591738d6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore8.js
@@ -0,0 +1,105 @@
+// dumprestore8.js
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" );
+
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" );
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore9.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..4bbb2fc18b1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, {chunksize:1} );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..d6b87ffe70c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,107 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options, "restore options not ignored");
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert( undefined === db.capped.exists().options );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..f99b5d0405c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,35 @@
+// dumprestore_auth.js
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern", "0"); // Should fail
+assert.eq(0 , c.count() , "after restore without auth");
+
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+assert.eq(3, adminDB.system.users.count());
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth2.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..fd7d9a034d3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,96 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..b87418ed176
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,199 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod);
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpsecondary.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..7a641542498
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/dumpsecondary.js
@@ -0,0 +1,38 @@
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..a7a7bcee90c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport1.js
@@ -0,0 +1,66 @@
+// exportimport1.js
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..f18ba6cbd4b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport3.js
@@ -0,0 +1,27 @@
+// exportimport3.js
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport4.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport5.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport6.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a01d49a9c8b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport6.js
@@ -0,0 +1,26 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_bigarray.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..43a209b8453
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,62 @@
+// Test importing collections represented as a single line array above the maximum document size
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+for (i = 0; i < numDocs; ++i) {
+ src.insert({ x : bigString });
+}
+var lastError = exportimport_db.getLastError();
+if (lastError == null) {
+ print('Finished inserting ' + numDocs + ' documents');
+}
+else {
+ doassert('Insertion failed: ' + lastError);
+}
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_date.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..57a860ca1a8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/exportimport_date.js
@@ -0,0 +1,49 @@
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/files1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/files1.js
new file mode 100644
index 00000000000..acfcc16dcc3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/files1.js
@@ -0,0 +1,27 @@
+// files1.js
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/restorewithauth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..ac9e7bc756b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/restorewithauth.js
@@ -0,0 +1,113 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 )
+
+//make sure it has no index except _id
+assert.eq(foo.system.indexes.count(), 2);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.system.indexes.count(), 3);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0);
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+// make sure that the collection is empty
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.system.indexes.count(), 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/stat1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/stat1.js
new file mode 100644
index 00000000000..539827e1704
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/stat1.js
@@ -0,0 +1,22 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+db.dropAllUsers();
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 1, "mongostat should exit with 1 with eliot:wrong");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool_replset.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..5e8aac672d6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tool_replset.js
@@ -0,0 +1,69 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tsv1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..1b0ddbb7c9e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy26/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/buildlogger.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..f8c8212a890
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/buildlogger.py
@@ -0,0 +1,491 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/cleanbb.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/__init__.py
new file mode 100644
index 00000000000..c3961685ab8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/__init__.py
@@ -0,0 +1 @@
+from __future__ import absolute_import
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/pipe.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/pipe.py
new file mode 100644
index 00000000000..bb080721b2d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/resmokelib/core/pipe.py
@@ -0,0 +1,87 @@
+"""
+Helper class to read output of a subprocess. Used to avoid deadlocks
+from the pipe buffer filling up and blocking the subprocess while it's
+being waited on.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class LoggerPipe(threading.Thread):
+ """
+ Asynchronously reads the output of a subprocess and sends it to a
+ logger.
+ """
+
+ # The start() and join() methods are not intended to be called directly on the LoggerPipe
+ # instance. Since we override them for that effect, the super's version are preserved here.
+ __start = threading.Thread.start
+ __join = threading.Thread.join
+
+ def __init__(self, logger, level, pipe_out):
+ """
+ Initializes the LoggerPipe with the specified logger, logging
+ level to use, and pipe to read from.
+ """
+
+ threading.Thread.__init__(self)
+ # Main thread should not call join() when exiting
+ self.daemon = True
+
+ self.__logger = logger
+ self.__level = level
+ self.__pipe_out = pipe_out
+
+ self.__lock = threading.Lock()
+ self.__condition = threading.Condition(self.__lock)
+
+ self.__started = False
+ self.__finished = False
+
+ LoggerPipe.__start(self)
+
+ def start(self):
+ raise NotImplementedError("start should not be called directly")
+
+ def run(self):
+ """
+ Reads the output from 'pipe_out' and logs each line to 'logger'.
+ """
+
+ with self.__lock:
+ self.__started = True
+ self.__condition.notify_all()
+
+ # Close the pipe when finished reading all of the output.
+ with self.__pipe_out:
+ # Avoid buffering the output from the pipe.
+ for line in iter(self.__pipe_out.readline, b""):
+ # Convert the output of the process from a bytestring to a UTF-8 string, and replace
+ # any characters that cannot be decoded with the official Unicode replacement
+ # character, U+FFFD. The log messages of MongoDB processes are not always valid
+ # UTF-8 sequences. See SERVER-7506.
+ line = line.decode("utf-8", "replace")
+ self.__logger.log(self.__level, line.rstrip())
+
+ with self.__lock:
+ self.__finished = True
+ self.__condition.notify_all()
+
+ def join(self, timeout=None):
+ raise NotImplementedError("join should not be called directly")
+
+ def wait_until_started(self):
+ with self.__lock:
+ while not self.__started:
+ self.__condition.wait()
+
+ def wait_until_finished(self):
+ with self.__lock:
+ while not self.__finished:
+ self.__condition.wait()
+
+ # No need to pass a timeout to join() because the thread should already be done after
+ # notifying us it has finished reading output from the pipe.
+ LoggerPipe.__join(self) # Tidy up the started thread.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/smoke.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/smoke.py
new file mode 100755
index 00000000000..5c9809c8936
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/smoke.py
@@ -0,0 +1,1481 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test"),
+# don't take arguments for the dbpath, but unconditionally use
+# "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+import logging
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
+
+from buildscripts.resmokelib.core import pipe
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ self.job_object = None
+ self._inner_proc_pid = None
+ self._stdout_pipe = None
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are always set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ if self.kwargs.get('wiredtiger_engine_config'):
+ argv += ["--wiredTigerEngineConfig", self.kwargs.get('wiredtiger_engine_config')]
+ if self.kwargs.get('wiredtiger_collection_config'):
+ argv += ["--wiredTigerCollectionConfig", self.kwargs.get('wiredtiger_collection_config')]
+ if self.kwargs.get('wiredtiger_index_config'):
+ argv += ["--wiredTigerIndexConfig", self.kwargs.get('wiredtiger_index_config')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'SCRAM-SHA-1')
+ if authMechanism != 'SCRAM-SHA-1':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ # If the mongod process is spawned under buildlogger.py, then the first line of output
+ # should include the pid of the underlying mongod process. If smoke.py didn't create its own
+ # job object because it is already inside one, then the pid is used to attempt to terminate
+ # the underlying mongod process.
+ first_line = self.proc.stdout.readline()
+ match = re.search("^\[buildlogger.py\] pid: (?P<pid>[0-9]+)$", first_line.rstrip())
+ if match is not None:
+ self._inner_proc_pid = int(match.group("pid"))
+ else:
+ # The first line of output didn't include the pid of the underlying mongod process. We
+ # write the first line of output to smoke.py's stdout to ensure the message doesn't get
+ # lost since it's possible that buildlogger.py isn't being used.
+ sys.stdout.write(first_line)
+
+ logger = logging.Logger("", level=logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(fmt="%(message)s"))
+ logger.addHandler(handler)
+
+ self._stdout_pipe = pipe.LoggerPipe(logger, logging.INFO, self.proc.stdout)
+ self._stdout_pipe.wait_until_started()
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On Windows, this
+ method also assigns the started process to a job object if a new
+ one was created. This ensures that any child processes of this
+ process can be killed with a single call to TerminateJobObject
+ (see self.stop()).
+ """
+
+ creation_flags = 0
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+ import win32process
+
+ # Don't create a job object if the current process is already inside one.
+ if not win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ proc = Popen(argv, creationflags=creation_flags, stdout=PIPE, stderr=None, bufsize=0)
+
+ if self.job_object is not None:
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32" and self.job_object is not None:
+ # If smoke.py created its own job object, then we clean up the spawned processes by
+ # terminating it.
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif os.sys.platform == "win32":
+ # If smoke.py didn't create its own job object, then we attempt to clean up the
+ # spawned processes by terminating them individually.
+ import win32api
+ import win32con
+ import win32event
+ import win32process
+ import winerror
+
+ def win32_terminate(handle):
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python
+ # 2.7 because earlier versions do not catch exceptions.
+ try:
+ win32process.TerminateProcess(handle, -1)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has
+ # already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ # Terminate the mongod process underlying buildlogger.py if one exists.
+ if self._inner_proc_pid is not None:
+ # The PROCESS_TERMINATE privilege is necessary to call TerminateProcess() and
+ # the SYNCHRONIZE privilege is necessary to call WaitForSingleObject(). See
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx
+ # for more details.
+ required_access = win32con.PROCESS_TERMINATE | win32con.SYNCHRONIZE
+ inner_proc_handle = win32api.OpenProcess(required_access,
+ False,
+ self._inner_proc_pid)
+ try:
+ win32_terminate(inner_proc_handle)
+ win32event.WaitForSingleObject(inner_proc_handle, win32event.INFINITE)
+ finally:
+ win32api.CloseHandle(inner_proc_handle)
+
+ win32_terminate(self.proc._handle)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+
+ if self._stdout_pipe is not None:
+ self._stdout_pipe.wait_until_finished()
+
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ # Fail hard if mongod terminates with an error. That might indicate that an
+ # instrumented build (e.g. LSAN) has detected an error. For now we aren't doing this on
+ # windows because the exit code seems to be unpredictable. We don't have LSAN there
+ # anyway.
+ retcode = self.proc.returncode
+ if os.sys.platform != "win32" and retcode != 0:
+ raise(Exception('mongod process exited with non-zero code %d' % retcode))
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("sharding", "copydb_from_mongos.js"), # SERVER-13080
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe"]:
+ argv = [path]
+ # default data directory for dbtest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+
+ if storage_engine:
+ argv.extend(["--storageEngine", storage_engine])
+ if wiredtiger_engine_config:
+ argv.extend(["--wiredTigerEngineConfig", wiredtiger_engine_config])
+ if wiredtiger_collection_config:
+ argv.extend(["--wiredTigerCollectionConfig", wiredtiger_collection_config])
+ if wiredtiger_index_config:
+ argv.extend(["--wiredTigerIndexConfig", wiredtiger_index_config])
+
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.storageEngine = "' + ternary( storage_engine, storage_engine, "" ) + '";' + \
+ 'TestData.wiredTigerEngineConfig = "' + ternary( wiredtiger_engine_config, wiredtiger_engine_config, "" ) + '";' + \
+ 'TestData.wiredTigerCollectionConfig = "' + ternary( wiredtiger_collection_config, wiredtiger_collection_config, "" ) + '";' + \
+ 'TestData.wiredTigerIndexConfig = "' + ternary( wiredtiger_index_config, wiredtiger_index_config, "" ) + '";' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ if os.getenv('SMOKE_EVAL') is not None:
+ evalString += os.getenv('SMOKE_EVAL')
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "mmap_v1": ("mmap_v1/*.js", True),
+ "gle": ("gle/*.js", True),
+ "rocksDB": ("rocksDB/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine, wiredtiger_engine_config, wiredtiger_collection_config, wiredtiger_index_config
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ wiredtiger_engine_config = options.wiredtiger_engine_config
+ wiredtiger_collection_config = options.wiredtiger_collection_config
+ wiredtiger_index_config = options.wiredtiger_index_config
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine, wiredtiger_engine_config, wiredtiger_collection_config, wiredtiger_index_config
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--wiredTigerEngineConfig', dest='wiredtiger_engine_config', default=None,
+ help='Wired Tiger configuration to pass through to mongod')
+ parser.add_option('--wiredTigerCollectionConfig', dest='wiredtiger_collection_config', default=None,
+ help='Wired Tiger collection configuration to pass through to mongod')
+ parser.add_option('--wiredTigerIndexConfig', dest='wiredtiger_index_config', default=None,
+ help='Wired Tiger index configuration to pass through to mongod')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='SCRAM-SHA-1',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=(1 if 'detect_leaks=1' in os.getenv("ASAN_OPTIONS", "") else 20),
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/utils.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/analyze_plan.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..9c2ebffd890
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/analyze_plan.js
@@ -0,0 +1,80 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ }
+ else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ }
+ else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ }
+ else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ }
+ else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/authTestsKey b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/badSAN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/badSAN.pem
new file mode 100644
index 00000000000..d8e362731e0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/badSAN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgIDAYKXMA0GCSqGSIb3DQEBBQUAMHQxFzAVBgNVBAMTDktl
+cm5lbCBUZXN0IENBMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzAeFw0xNDA5MjMxNTE3MjNaFw0zNDA5MjMxNTE3MjNaMG8xEjAQBgNV
+BAMTCTEyNy4wLjAuMTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCDB/lxuzeU
+OHR5nnOTJM0fHz0WeicnuUfGG5wP89Mbkd3Y+BNS0ozbnkW+NAGhD+ehNBjogISZ
+jLCd+uaYu7TLWpkgki+1+gM99Ro0vv7dIc8vD7ToILKMbM8xQmLbSxDT2tCUoXlc
+m7ccgDZl9oW1scQYQ8gWHjmk3yK8sCoGa/uwr49u74aVM7673tLsK41m8oYPzt/q
+VGT+mXpBJQcGXkTNQtIPxBtD25jr+aPietS3u70zrVPY6ZDsGE7DofEeRl97kVoF
+NcpaQmVEwEo8KCWaT6OaPaUUUjAMwzqiZaHNZ6mL1pCr65bLXP6T9tiMtWLw5+SG
+3E09fhQuWod5AgMBAAGjFTATMBEGA1UdEQQKMAiCBmJhZFNBTjANBgkqhkiG9w0B
+AQUFAAOCAQEAQzlibJvlUpJG3vc5JppdrudpXoVAP3wtpzvnkrY0GTWIUE52mCIf
+MJ5sARvjzs/uMhV5GLnjqTcT+DFkihqKyFo1tKBD7LSuSjfDvjmggG9lq0/xDvVU
+uczAuNtI1T7N+6P7LyTG4HqniYouPMDWyCKBOmzzNsk+r1OJb6cxU7QQwmSWw1n1
+ztNcF6JzCQVcd9Isau9AEXZ9q0M0sjD9mL67Qo3Dh3Mvf4UkJKqm3KOQOupUHZLU
+vJwfsS2u+gfHY1Plywzq3AuT7ygbksR3Pqfs8LFPnuRAH+41sFTGUM52hiU7mNPj
+ebl8s1tjK7WQ+a8GTABJV0hDNeWd3Sr+Og==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAgwf5cbs3lDh0eZ5zkyTNHx89FnonJ7lHxhucD/PTG5Hd2PgT
+UtKM255FvjQBoQ/noTQY6ICEmYywnfrmmLu0y1qZIJIvtfoDPfUaNL7+3SHPLw+0
+6CCyjGzPMUJi20sQ09rQlKF5XJu3HIA2ZfaFtbHEGEPIFh45pN8ivLAqBmv7sK+P
+bu+GlTO+u97S7CuNZvKGD87f6lRk/pl6QSUHBl5EzULSD8QbQ9uY6/mj4nrUt7u9
+M61T2OmQ7BhOw6HxHkZfe5FaBTXKWkJlRMBKPCglmk+jmj2lFFIwDMM6omWhzWep
+i9aQq+uWy1z+k/bYjLVi8OfkhtxNPX4ULlqHeQIDAQABAoIBAC4Bx8jyJmKpq+Pk
+CcqZelg6HLXesA7XlGbv3M0RHIeqoM2E1SwYd5LJMM3G7ueBcR/97dz8+xH6/yyJ
+Ixxvk9xu9CMmkRABN9AyVkA867nzHA73Idr7WBXMQreWCqXa5o6sXt5BEB6/If0k
+23TTqUERqLuoWQHDHRRRsJ218RuNmbvBe8TGXcfunC0eeDVKDeqAXol6bD5lztdu
+B6jkdLt5UZSQ7X8OmClbeDlac90B8usNi+pUE9q1p7X462vAw8LohkxLY2nyIcmU
+feNdTNHP+lklv+E+p9w/Az7Hf6zxm525tw90QVI048fr9SL3ftLHOt4FhucSCn0Z
+CjylP4ECgYEA+nQrNVdVwmxcWCVn69LR1grNXUSz+fLHCo+QKma4IyC1kuuZ+BBo
+Iwdf9t/S1tgtTYru3uxzCpQg7J1iDeEFEsMHl0rc6U1MmIE+6OvACVG3yotqoOqE
+852pi1OWIe94yTk2ZmNXJ8gpUE/gtMprbcSWOb7IzzrXy2lDcaEMuGkCgYEAhe7L
+ZvYI4LEvu6GSPp97qBzDH9m5UrHaTZIJk/Nu7ie919Sdg62LTfphsaK+pSyA55XQ
+8L9P7wNUPC44NnE+7CIJZsIuKdYqR5QI6No9RdTyij0Hgljfc7KuH2b8lf8EjvuH
+qZAf5zL3pIOQs8E8/MYHlGIqmTkYK41eCAcS9JECgYEADnra6KmU9rmnGR2IhZTZ
+tuNG/kZzlVbY9R5ZumnX6YgBl23xp+ri6muJu88y9GLpM5t9tfu7pvfrc2KiAaVp
+0qzd6nxUi1SBwituxK6kmqVT1+z5jDYi26bY34pEms+qjw+0unSx3EXxRYhouGsf
+jOgZu1rxZzHCuirq0E38W0kCgYBzOK16RX37t9OFywlioJekWCIxu4BouSNCirl8
+s/eiIUR8cqiUCPAIRLhZNtZmiTPYiBW5mAyvZiDIqUao56InSVznL3TBf0LeU2ea
+023VLs79yGU2aTjLc1PDJjl03XDRhWj/okMgBsPvn1QUoNDT8ZXBvPZC3VCC31qe
+818GUQKBgQDBUP2BC/Th/0dErOQ5lWkY3YbmzrTp2pDsHGZJRD+OdQ5B8FUvCP8m
+JESk/0ATn7niUqawnOy/2KlKIkeBBV2XL1rjIGEhCkBUuhCiInNDqz1AGdXzIKaT
+myoZ4PhIsH1D643e6iLhyAZuUAA4yB31E2a3l7EMyhV3vKbdWWygGQ==
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/ca.pem
new file mode 100644
index 00000000000..eedfb473d4b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/ca.pem
@@ -0,0 +1,102 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml ca.pem
+#
+# Primary Root Certificate Authority Most Certificates are issued by this CA.
+-----BEGIN CERTIFICATE-----
+MIIDdDCCAlwCBBmRIxIwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjczOVoXDTM5MDkyNzIzMjczOVowdDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5l
+bCBUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAupVkx8+n
+AqzsANKwNPeCYlf2q0WgF4kSUMNJdpmMelrr7hh7EOnAU0hTAQx9BKTEbExeCzH6
+OArFNGjewjWVXwaOpCjK8FMvK6/lGVEpmoHNF9XuiQVmaQ4bJD6rC73YjpgNIPeL
+5PyoFLEZv+X2cRBPpTcSRcf87tk8HL7v0eyk1JBhkeKK68SYdWwZlHaa1jqwmliW
+WvVMkHVH3lx0VOgQwWtOgs0K1zpcZ0sH5MGpYRQOiidIRZj3PkKeTPQe2D6VQQtv
+2yDs9dWfCxJJP9QiWclL2rF/xqlFSNEIfNZpZhk6I1DHQpA2uyJfzRH62pFasJuB
+CVh5Tr0EDoVreQIDAQABoxMwETAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
+CwUAA4IBAQARdNCYYWxi2fyhJwzGHwIT261d/pTlOSYLlm84c72aEneFUnfp8/H5
+JjuFbnhiX+5+h3M7eDQhra9s+H3vKr7o38EIVf5OKXvpNLwv1UUmomBvKqccioYh
+bxrfwCzfBRuUmW05kcAVn8iKovqyxL7npEZbckwtT+BqZ4kOL4Uzre+S1HMx0zOu
+xulSYA/sBoJ2BB93ZIAqB+f/+InS9yggzyhhaQqS7QEl1L4nZE4Oy0jKcxdCzysm
+TqiyH+OI5SVRTfXh4XvHmdWBBaQyaTmQzXYUxUi7jg1jEAiebCGrEJv9plwq4KfC
+cze9NLBjaXR3GzonT8kICyVT/0UvhuJg
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6lWTHz6cCrOwA
+0rA094JiV/arRaAXiRJQw0l2mYx6WuvuGHsQ6cBTSFMBDH0EpMRsTF4LMfo4CsU0
+aN7CNZVfBo6kKMrwUy8rr+UZUSmagc0X1e6JBWZpDhskPqsLvdiOmA0g94vk/KgU
+sRm/5fZxEE+lNxJFx/zu2Twcvu/R7KTUkGGR4orrxJh1bBmUdprWOrCaWJZa9UyQ
+dUfeXHRU6BDBa06CzQrXOlxnSwfkwalhFA6KJ0hFmPc+Qp5M9B7YPpVBC2/bIOz1
+1Z8LEkk/1CJZyUvasX/GqUVI0Qh81mlmGTojUMdCkDa7Il/NEfrakVqwm4EJWHlO
+vQQOhWt5AgMBAAECggEATMiSEtBXsHgtHyGVNhbtZz8/2bfrbAQSr33OS6pg7zFf
+ijo02v73EM7gqbvT7wdHdjHvObg1er6j7S+DDHU0pUhzhwbqjQAOl3HpXAPUlSr5
+leeKJIU+YdlWPhI0Hb7g0b2tGXYF8hQvr2Q6bohaZKkLvbfuIx1r73cij8nbs411
+MU892GBVZRcMGITLT7W1BFWYGmjqIH7FabSpFpIxeEX2ONooHHOYBlj3dJs8WdGr
+/gGJSYq1YGUbVQLIn5m9JmYuFShEhcSrGVAdKto2qRqUpTaePXLU8dozInejFMVb
+yul5fwPuUGgDz+x6wKWRSA8138uaEHQl7r5DqOv6oQKBgQDkYg37TdSC0cCWxL/J
+vFzMPQ/p9iT8ZgOA1lvUUkpwhGOdAqAiR35vZPwFtu3sSl/BUER7ZDf+ILfdPzW5
+wFiWWAkrS8jWl9RaFwkjeTq5fwv3kJGwPwVvCzjLjX7tUDCJv8azOuIxoindCnnC
+y5HXm6hOQZS26lZqNDzsBFzWdQKBgQDRJV9+7gRyZhl8PIBO91Hl5C+wTBFjFFrH
+TJRnT0pNX5FRkPZoyPgJD44S0zFg4oWKl1r2YvfwCQZtQl3h2ZDDGnEE7mJ0++hi
++UUfLyF9dXq27aK4mJsDkdp5Hi6vqfBETPPPyHffwY/UFLOsseqU+5aVG/7Hk6Th
+r2jQzNzIdQKBgQCx1SRb9YuvXdKf044fcNu1cSquHCtKmcjKjp+soXMzT/Mc9Elq
+x87MwI55iKqU3ojXR0A78Jqk+CcTUT/tZrfUQbLKEtXNOyZwDBXimGhAvou79sdq
+vHfnso5D+ebGtTVGXZ1EPGqbCVGdu6V6J/dlMuCIJwq8r5YgVpLFmNQNbQKBgQCD
+PNRjra+UqYSsdLp+0qrNexaBpfnzFj6weNlWymZxjtp9OF2m+xTJer1ICsYdaYcq
+pDcsDIZQVRl3TgHjSsaP5vOsiHm3tqroEErTepulQiayywMkmn4LC2bwQYRCLnza
+Hv+PDthJzAgYqLTmVO5CdmzTPDHvwjHgfFVlUGfqUQKBgE16f0Euzx3qySIJmMrR
+dNI/y8FSSLgoxWPX7WQIpnXhbNeqqlwGNOi8beSkiRsaL6zJcR3tTPkOfsSeIiCf
+yG73jB3F1L8A5dX2YrKOJOzSxrByzVDnfrukCuxkcW0N31OJ0sFiq9Kjcb/9zyiI
+BQTIxkN91Squn4Y+I3ikyoc3
+-----END PRIVATE KEY-----
+# Certificate from ca-2019.pem
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client.pem
new file mode 100644
index 00000000000..54e73f889fc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client.pem
@@ -0,0 +1,54 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml client.pem
+#
+# General purpose client certificate.
+-----BEGIN CERTIFICATE-----
+MIIDqzCCApMCBAlumm0wDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjc0MFoXDTM5MDkyNzIzMjc0MFowcDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYDVQQDDAZj
+bGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChwxZmhXIUlErr
+Na+gpnBjMigZPXakQLvpWZ02PBbXCfwLAPfEw5B2QH7Y01pxnFiGKY60CiMr1lpA
+IcY60Po4MjzJEVOn1xaVrcaXrD8nCxk8WgndRsd6L7O36etA+zlrndTZLyB1RZza
+xrcuKJ8fELAEFfDdrZOJz1IZ82S81N3U8CX4t8HSKtMBhRTskGxDFGGKrpm9i4ly
+WPcKLxHW5N0C7gBpyFsB6cOs87VFOtL2/iQBaOsF5aRuvNMvuyxtwXy8prJXzh7d
+8N5WaXa8qnR0WW5Kwgs+Snuzi6LD88hGRUv99ZPNFrRYfUPR9nerc+DcsBXklUAD
+1CgKXq+/AgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM
+MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQnVPhN4rWyrA1qxruucxPplu6V5jANBgkq
+hkiG9w0BAQsFAAOCAQEAH4pD8iA1BgpLEIfKYzBA0Qv/ZgCRNtvMz4bdY0rIUjAJ
+0QGOr4Bj0RFj3CJgrhgdXyb07ZcpzHMHzMjwQxPV+1YlxzUJeH6VMflMuemh0oL6
+QZ3YO7eUs174vnAG1ofQ41g5lOJoI3d8dVM6jeYQiJ4W0WHRXUMwJ9EasRWmcx+0
+McPZlJx/ScJRghGrVpKfdxwlq2GOmC5ALW3zFDBkZGanVxSSFlyxfczBms9ZmqTv
+wk+Jt4yoGSnK3eEDR37BBiKGMTUjIodjcOaPUxCsUOITfa6/cBb83Ot/XKtSLAwd
+J/7CxGmyBzwkTSoxgrVVDBriClB7P3rsRDrcvTkVgQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQChwxZmhXIUlErr
+Na+gpnBjMigZPXakQLvpWZ02PBbXCfwLAPfEw5B2QH7Y01pxnFiGKY60CiMr1lpA
+IcY60Po4MjzJEVOn1xaVrcaXrD8nCxk8WgndRsd6L7O36etA+zlrndTZLyB1RZza
+xrcuKJ8fELAEFfDdrZOJz1IZ82S81N3U8CX4t8HSKtMBhRTskGxDFGGKrpm9i4ly
+WPcKLxHW5N0C7gBpyFsB6cOs87VFOtL2/iQBaOsF5aRuvNMvuyxtwXy8prJXzh7d
+8N5WaXa8qnR0WW5Kwgs+Snuzi6LD88hGRUv99ZPNFrRYfUPR9nerc+DcsBXklUAD
+1CgKXq+/AgMBAAECggEBAJNaNeaP+tp6N0DJd8G8u7Xws4xCa/e1rDcl1crYs+mR
+A9I2QH0xtD+0Apia9tF1HUKhSogAkAdXzICNA5kCUlysjqiOKwlCKiWQ1c3GLwTu
+3D8YudGirwVDvM90u0NHVggNDx4ECuoozniP+b+Ha2ON/PjLz8zvV+16OIzBJWvI
+fUkeuyHWsYrh5wNdjW9pT0+N85q9pzuGGthoshR4d5z2bKhm9MKA8mz1p3DMS7a3
+F2AiJPWkvzUksZ/h/WXOk19DhmG7lHkNEzfm/RsUjfswx0Eoz3gmNkH7oS3yYNJD
+yDHt6553zaP0UA+mJzaHqaw+JN1Vxdva+IYNLs+ekLkCgYEA1kjar3m7RiGXu8ZL
+lew3Hhwp0W4PjhfohdLAhyLAbL+8fQl7Blz21wz9/E77JpFQUmpfWhD9V39gvpk9
+X3/dhAX2dbZFphWEPNReF5oWDDiQzZLnfv3UqKQ3/Q58hKhujINIIvPbqyhDz4AW
+zhLKN/JLmNo5mquWOGRvvAcoweMCgYEAwUC0xrZqf8G6y6JGk24u3coY9dmkohNZ
+L/GfKK059b/Px0k05WaIXRxKWlRhIHQjhqVoctjd061WDgHoa8jkS5ARbqoxJeZI
+izx11MDfbdykv/rIm+mpXl1B/WjQ+oV8wg5J8Sz5o8bXlcr7SlWoj/bOFDgfUsG2
+cTBdLx8KEXUCgYEAw8CILfcZkmaLs1jhodLJQzNdLFnL0fWbT+0IPisL+AxsBxmL
+DnQMdsYCm+QW4NOsua088A1HMhBF87gPbddkKAA+dqgNFjzfsaYbUppLe9yMw49o
+9QSyqWBjWURLkfBAFRK06mE1EjVVRRBxRM27d8JbNwZbyyTmtRtjSzSLEhsCgYEA
+qMTrm0afh2cWzNOxMvvcrhDa74nc/zZKg1ZOGuX9YiLbQD0sltKOcFJOlMhv8jm6
+9NHrf7DpOP1908nSwp600VDGCJFVTrXn0MFCXkT6WyWAM7C+FXtgUGq9QKOTLdX5
++DcmWBthAKicidK01lwsU13E51+D63AE9qC4IHjJACkCgYBlUwyanKuhhmKMJo6b
+PBhpUAruP9LRIzoYYlGZcZhaJXKyfrzc0pMM0Xmxk0avG6yfvttu4jndeK/dPjwY
+Fbp6+x+iWUykgPbDMMij1WlC7p/RijenN+vQlHzhB+Zyl8jMjpANHnF2PunaaNa2
+K0ZXGPQcmZrvdOPG7+q9wMMVxg==
+-----END PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..03db67deb50
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/client_revoked.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBAjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB4MRcwFQYDVQQD
+Ew5jbGllbnRfcmV2b2tlZDETMBEGA1UECxMKS2VybmVsVXNlcjEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+lJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSKyZMGCcqlYVQmqT/J
+Fnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505HaWv7b+M3qksRHDLpw
+/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/6vcUkg/aU/50MRUN
+qGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4lQjrCpR36fkr5a+vI
+UbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNFvGDOCNBKZK5ZxLZ3
+gGFcR6kL6u11y4zoLrZ6xwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQB8WQMn/cjh
+9qFtr7JL4VPIz/+96QaBmkHxMqiYL/iMg5Vko3GllLc1mgfWQfaWOvyRJClKj395
+595L2u8wBKon3DXUPAkinc6+VOwDWsxFLNtWl+jhigat5UDzGm8ZKFhl0WwNhqzZ
+dlNPrh2LJZzPFfimfGyVkhPHYYdELvn+bnEMT8ae1jw2yQEeVFzHe7ZdlV5nMOE7
+Gx6ZZhYlS+jgpIxez5aiKqit/0azq5GGkpCv2H8/EXxkR4gLZGYnIqGuZP3r34NY
+Lkh5J3Qnpyhdopa/34yOCa8mY1wW7vEro0fb/Dh21bpyEOz6tBk3C1QRaGD+XQOM
+cedxtUjYmWqn
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSK
+yZMGCcqlYVQmqT/JFnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505Ha
+Wv7b+M3qksRHDLpw/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/
+6vcUkg/aU/50MRUNqGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4l
+QjrCpR36fkr5a+vIUbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNF
+vGDOCNBKZK5ZxLZ3gGFcR6kL6u11y4zoLrZ6xwIDAQABAoIBAFlu0T3q18Iu0VlR
+n5GEYMgvSuM4NAVVKo8wwwYMwu1xuvXb+NMLyuyFqzaCQKpHmywOOnfhCC/KkxX8
+Ho87kTbTDKhuXZyOHx0cA1zKCDSlGdK8yt9M1vJMa0pdGi2M34b+uOQ35IVsOocH
+4KWayIH7g52V2xZ2bpOSSnpm0uCPZSBTgClCgTUYepOT2wbLn/8V0NtVpZhDsBqg
+fORuEHkiurrbLa8yjQsvbR+hsR/XbGhre8sTQapj4EITXvkEuOL/vwbRebhOFHgh
+8sipsXZ9CMaJkBpVoLZTxTKQID/9006cczJK2MGKFhn6mvP6AeFuJAM3xqLGZTc4
+xxpfJyECgYEA0+iKxy5r1WUpBHR8jTh7WjLc6r5MFJQlGgLPjdQW6gCIe/PZc+b9
+x5vDp27EQ1cAEePEu0glQ/yk19yfxbxrqHsRjRrgwoiYTXjGI5zZSjXKArHyEgBj
+XOyo5leO5XMFnk2AShPlh+/RhAW3NhxcWkBEAsCD6QyC3BPvP6aaAXkCgYEAs4WH
+dTuweTdnyquHQm59ijatvBeP8h4tBozSupflQjB9WxJeW5uEa8lNQ3lSz1F4TV3M
+xvGdDSqwftLRS2mWGho/1jaCeAzjsiUQ2WUHChxprt0+QU7XkJbaBY9eF+6THZFw
+sDG688TiolxqoD8OYi8EtxmIvbQhXHmXnrk3jj8CgYBSi74rkrisuqg8tQejl0Ht
+w+xsgM5wIblGJZwmOlzmsGh6KGYnkO6Ap/uSKELJnIVJcrk63wKtNigccjPGufwR
++EbA+ZxeCwmQ/B/q1XmLP+K+JAUQ4BfUpdexSqA+XwzsOnJj6NY7mr65t+RDbs7G
+1Uvo6oc37Ai5pAZJfCN3uQKBgQAJr5qvaJkM8UBYXwjdPLjpTCnzjBHoLlifkdmM
+18U23QbmcwdESg/LAQF6MoGVTf//rJ/v2/ltTHBZZ2aDex7uKZxoImjHsWpXokhW
+cmz+zqmlFarWOzrGQl1hD2s0P1sQrVg3KXe8z1KrD/Fw0/Yitga7GlWWZrGmG6li
+lvu4YQKBgQANODQYEaz739IoPNnMfTpTqAoQIOR4PNdMfCXSQrCB8i0Hh4z48E4F
+DEAd1xIYyxI8pu7r52dQlBk7yrILOTG0gmgLJd5xKdtCTrasYAICI3hsRLtP8dVA
+8WeykXY4Wf1bYQ+VzKVImkwL/SBm2ik5woyxCzT8JSjyoAwRrQp9Vw==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/cluster_cert.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/cluster_cert.pem
new file mode 100644
index 00000000000..a8623ab67ef
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/cluster_cert.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkagAwIBAgIBBDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBxMRQwEgYDVQQD
+EwtjbHVzdGVydGVzdDEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCX42ZTwADG
+sEkS7ijfADlDQaJpbdgrnQKa5ssMQK3oRGSqXfTp0ThsJiVBbYZ8ZZRpPMgJdowa
+pFCGHQJh6VOdKelR0f/uNVpBGVz1yD4E4AtkA6UYcIJq6ywcj+W7Pli1Ed8VUN3Q
+tBU+HvHiEdMj74kLJb4ID1cP3gehvRv/0szkN8/ODFKCgYb1619BdFb9gRn8eily
+Wcg1m1gXz2xSfqRZkFEcEYet3BeOEGZBhaufJFzinvQjocH+kWFKlZf0+2DEFFbH
+NRqmabMmqMBUke629EUn8a7PBWBYNLld9afoNHwNY68wpONf5IqR2mNar5bVz8/d
+4g7BuVNvEFdJAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAA3U2O+cE/ZS8SDBw/sr
+BVFf0uaoME7+XX2jdTi4RUpWPfQ6uTkhKnXKzTzGrQtKwA96slGp4c3mxGBaAbC5
+IuTS97mLCju9NFvJVtazIajO4eNlG6dJSk0pQzjc0RAeLYksX/9NRNKZ+lQ5QVS2
+NVLce70QZBIvujjVJZ5hqDdjPV0JGOOUzNGyyUhzgY7s9MQagNnBSu5HO4CK1onc
+goOkizulq/5WF+JtqW8VKKx+/CH6SnTkS4b3qbjgKRmHZcOshH/d4KqhoLya7sfH
+pedmm7WgO9p8umXXqNj+04ehuPKTnD8tLMhj+GbJ9eIChPCBf1XnIzOXYep+fq9j
+n/g=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAl+NmU8AAxrBJEu4o3wA5Q0GiaW3YK50CmubLDECt6ERkql30
+6dE4bCYlQW2GfGWUaTzICXaMGqRQhh0CYelTnSnpUdH/7jVaQRlc9cg+BOALZAOl
+GHCCaussHI/luz5YtRHfFVDd0LQVPh7x4hHTI++JCyW+CA9XD94Hob0b/9LM5DfP
+zgxSgoGG9etfQXRW/YEZ/HopclnINZtYF89sUn6kWZBRHBGHrdwXjhBmQYWrnyRc
+4p70I6HB/pFhSpWX9PtgxBRWxzUapmmzJqjAVJHutvRFJ/GuzwVgWDS5XfWn6DR8
+DWOvMKTjX+SKkdpjWq+W1c/P3eIOwblTbxBXSQIDAQABAoIBAHhjNFMDZ1oUlgbc
+ICcI/VoyprFb8DA5ZbwzXBMdHPpxYzyp9hpxy3/lCDiAwEzPEsAK/h6KCOiA/pYe
+XioPD0gN1TIV+f3r6dqZaNYi3g1tK3odbXkejDCEsFT/NT6hXxw9yw0RKI9ofUHc
+synVqP3duUjNpH6s8fvQp0nqI0wzoNm1kklpTWVjZmbtSZF9m/xfv7NGwQEYUL2V
+f5YvX6aHPVDtUXAqyPBgv6SGuogSSjwRTsNTef3aY6Se5MlP3YIfRqdad8+ORkKu
+WSrO+GjQccV4sztD8Sn3LR7qe6Lmid4yopHSS4EFq0Sc8LznTeflWcRAsBLezRp5
+xZB/blECgYEA8yrEzFA247AOXbhL1CdqMyPs523oy5+dmByyovjYjEhjUCRlAa9D
+ApvID4TfAkA4n0rUdICCtwbZlFrBZbn6rXNvJ362ufZjvaFIucQm90YkG1J6Ldek
+8ohJfLyyLLWzVHJIS7WxFqqsGmDhYUTErFbJZjI8tNSglrc81jUWT7UCgYEAn+dw
+ICyc09f6+xm3nFZIOq2Gtpw8lrOJlwZugn1AqY2D5Ko2gq1Fx2oZWpVaBivjH3gU
+ONlnPuealE0RJHvCm/+axy7Rcj65IwTrN5V+j6rg1tuEdi70PvNKmN6XQqRvEjOX
+HOh3gQYP6EFAoVINZZqUkwJzqpv4tnOSpEHXncUCgYB3+Z8Vq3IZjtDXvslzCGtm
+hhAp81mLtdocpfQhYqP9Ou39KafIV/+49sGTnpwlUShet53xSUK1KSULBGgtV8Bt
++ela1DM1t3Joqn3mYfhTwoCoFl5/5cjVfRa8+6DxXEj5nlU7PY79PwIhFbG9ux9K
+ZJuD17+J/Oqq0gerLJAwjQKBgAS4AbkRV/dwcjmiwqZcbXk90bHl3mvcFH1edTho
+ldXrFS9UTpOApYSC/wiLS8LO3L76/i3HTKKwlwE1XQIknNOZsWmbWhby/uenp4FW
+agu3UTdF9xy9uft5loP4XaJb0+NHnnf97DjkgueptUyNbVPIQgYsllk8jRRlSLiM
+MN65AoGAUPLlh8ok/iNirO5YKqc5/3FKA1o1V1KSTHYVUK+Y+vuVJxQZeO3LMybe
+7AJ1cLHEWc8V4B27e6g33rfGGAW+/+RJ7/uHxuYCuKhstbq/x+rf9i4nl93emlMV
+PC3yuZsCmpk9Uypzi2+PT10yVgXkXRYtLpuUpoABWRzVXGnEsXo=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..f194b73ce7f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,214 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Create and authenticate high-privilege user in case mongod is running with authorization.
+ // Try/catch is necessary in case this is being run on an uninitiated replset, by a test
+ // such as repl_options.js for example.
+ var ex;
+ try {
+ mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
+ mongod.getDB("admin").auth("root", "pass");
+ }
+ catch (ex) {
+ }
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ mongod.getDB("admin").logout();
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_auth.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_auth.ini
new file mode 100644
index 00000000000..c1193be1b03
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_auth.ini
@@ -0,0 +1 @@
+auth=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_dur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_dur.ini
new file mode 100644
index 00000000000..8f83f3ae5a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_dur.ini
@@ -0,0 +1 @@
+dur=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_httpinterface.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_httpinterface.ini
new file mode 100644
index 00000000000..fc839a98a76
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_httpinterface.ini
@@ -0,0 +1 @@
+httpinterface=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_ipv6.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_ipv6.ini
new file mode 100644
index 00000000000..a091421022d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_ipv6.ini
@@ -0,0 +1 @@
+ipv6=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_journal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_journal.ini
new file mode 100644
index 00000000000..d0010a86906
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_journal.ini
@@ -0,0 +1 @@
+journal=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_jsonp.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_jsonp.ini
new file mode 100644
index 00000000000..82847f50b2b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_jsonp.ini
@@ -0,0 +1 @@
+jsonp=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_moveparanoia.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_moveparanoia.ini
new file mode 100644
index 00000000000..f21b50f9513
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_moveparanoia.ini
@@ -0,0 +1 @@
+moveParanoia=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noauth.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noauth.ini
new file mode 100644
index 00000000000..a65f909baf3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noauth.ini
@@ -0,0 +1 @@
+noauth=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noautosplit.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noautosplit.ini
new file mode 100644
index 00000000000..b490f9038dd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noautosplit.ini
@@ -0,0 +1 @@
+noAutoSplit=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nodur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nodur.ini
new file mode 100644
index 00000000000..b0c73a48b30
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nodur.ini
@@ -0,0 +1 @@
+nodur=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nohttpinterface.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nohttpinterface.ini
new file mode 100644
index 00000000000..52c4958da6e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nohttpinterface.ini
@@ -0,0 +1 @@
+nohttpinterface=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noindexbuildretry.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noindexbuildretry.ini
new file mode 100644
index 00000000000..79e428c492f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noindexbuildretry.ini
@@ -0,0 +1 @@
+noIndexBuildRetry=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nojournal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nojournal.ini
new file mode 100644
index 00000000000..17172363d25
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nomoveparanoia.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nomoveparanoia.ini
new file mode 100644
index 00000000000..4696304134f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nomoveparanoia.ini
@@ -0,0 +1 @@
+noMoveParanoia=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noobjcheck.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noobjcheck.ini
new file mode 100644
index 00000000000..471e83c3172
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noobjcheck.ini
@@ -0,0 +1 @@
+noobjcheck=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noprealloc.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noprealloc.ini
new file mode 100644
index 00000000000..08c78be3507
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noprealloc.ini
@@ -0,0 +1 @@
+noprealloc=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nounixsocket.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nounixsocket.ini
new file mode 100644
index 00000000000..66da9f08391
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_nounixsocket.ini
@@ -0,0 +1 @@
+nounixsocket=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_objcheck.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_objcheck.ini
new file mode 100644
index 00000000000..bd19d026bbf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/disable_objcheck.ini
@@ -0,0 +1 @@
+objcheck=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_dur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_dur.ini
new file mode 100644
index 00000000000..43495fbd0bd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_dur.ini
@@ -0,0 +1 @@
+dur=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_journal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_journal.ini
new file mode 100644
index 00000000000..f750ac2e185
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_journal.ini
@@ -0,0 +1 @@
+journal=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nodur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nodur.ini
new file mode 100644
index 00000000000..f1046df16a9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nodur.ini
@@ -0,0 +1 @@
+nodur=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nojournal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nojournal.ini
new file mode 100644
index 00000000000..737e5c28029
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/config_files/implicitly_enable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl.pem
new file mode 100644
index 00000000000..275c9e2d91c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:56:28 2014 GMT
+ Next Update: Aug 18 13:56:28 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 48:1b:0b:b1:89:f5:6f:af:3c:dd:2a:a0:e5:55:04:80:16:b4:
+ 23:98:39:bb:9f:16:c9:25:73:72:c6:a6:73:21:1d:1a:b6:99:
+ fc:47:5e:bc:af:64:29:02:9c:a5:db:15:8a:65:48:3c:4f:a6:
+ cd:35:47:aa:c6:c0:39:f5:a6:88:8f:1b:6c:26:61:4e:10:d7:
+ e2:b0:20:3a:64:92:c1:d3:2a:11:3e:03:e2:50:fd:4e:3c:de:
+ e2:e5:78:dc:8e:07:a5:69:55:13:2b:8f:ae:21:00:42:85:ff:
+ b6:b1:2b:69:08:40:5a:25:8c:fe:57:7f:b1:06:b0:72:ff:61:
+ de:21:59:05:a8:1b:9e:c7:8a:08:ab:f5:bc:51:b3:36:68:0f:
+ 54:65:3c:8d:b7:80:d0:27:01:3e:43:97:89:19:89:0e:c5:01:
+ 2c:55:9f:b6:e4:c8:0b:35:f8:52:45:d3:b4:09:ce:df:73:98:
+ f5:4c:e4:5a:06:ac:63:4c:f8:4d:9c:af:88:fc:19:f7:77:ea:
+ ee:56:18:49:16:ce:62:66:d1:1b:8d:66:33:b5:dc:b1:25:b3:
+ 6c:81:e9:d0:8a:1d:83:61:49:0e:d9:94:6a:46:80:41:d6:b6:
+ 59:a9:30:55:3d:5b:d3:5b:f1:37:ec:2b:76:d0:3a:ac:b2:c8:
+ 7c:77:04:78
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNTYyOFoXDTI0MDgxODEzNTYyOFqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEASBsLsYn1b6883Sqg5VUEgBa0I5g5u58WySVzcsam
+cyEdGraZ/EdevK9kKQKcpdsVimVIPE+mzTVHqsbAOfWmiI8bbCZhThDX4rAgOmSS
+wdMqET4D4lD9Tjze4uV43I4HpWlVEyuPriEAQoX/trEraQhAWiWM/ld/sQawcv9h
+3iFZBagbnseKCKv1vFGzNmgPVGU8jbeA0CcBPkOXiRmJDsUBLFWftuTICzX4UkXT
+tAnO33OY9UzkWgasY0z4TZyviPwZ93fq7lYYSRbOYmbRG41mM7XcsSWzbIHp0Iod
+g2FJDtmUakaAQda2WakwVT1b01vxN+wrdtA6rLLIfHcEeA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..0b99d56936e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,41 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:43:27 2014 GMT
+ Next Update: Aug 18 13:43:27 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+Revoked Certificates:
+ Serial Number: 02
+ Revocation Date: Aug 21 13:43:27 2014 GMT
+ Signature Algorithm: sha256WithRSAEncryption
+ 24:86:73:8d:7f:55:15:d0:d6:8a:47:53:cf:97:f7:e5:3d:0b:
+ 4a:ea:fb:02:6a:2e:79:c6:b1:38:b2:ac:f0:c0:64:47:b0:3e:
+ ad:4e:2e:94:e6:64:ed:79:34:bd:74:c0:d4:3d:b9:a1:bb:38:
+ 89:5c:02:6a:ad:6b:dc:3b:64:34:6a:2d:4c:90:36:82:95:0c:
+ 19:88:e2:a3:bf:8e:1b:56:98:37:32:87:ed:f0:bd:dd:e2:0d:
+ f9:80:dc:f2:a5:b4:ee:d9:bb:83:fe:b8:3a:13:e0:da:fc:04:
+ 77:fb:ce:f9:c5:2a:54:a7:f0:34:09:2a:b2:3d:46:1b:48:e6:
+ e8:16:c7:a1:3c:88:8c:72:cd:cc:53:dc:f8:54:63:1f:b9:8b:
+ ea:2c:e5:26:c5:b4:a4:9f:8b:e1:6c:85:9b:c6:63:6f:2f:ae:
+ 18:c5:6a:23:f0:58:27:85:5c:0f:01:04:da:d2:8b:de:9e:ab:
+ 46:00:22:07:28:e1:ef:46:91:90:06:58:95:05:68:67:58:6e:
+ 67:a8:0b:06:1a:73:d9:04:18:c9:a3:e4:e3:d6:94:a3:e1:5c:
+ e5:08:1b:b3:9d:ab:3e:ea:20:b1:04:e5:90:e1:42:54:b2:58:
+ bb:51:1a:48:87:60:b0:95:4a:2e:ce:a0:4f:8c:17:6d:6b:4c:
+ 37:aa:4d:d7
+-----BEGIN X509 CRL-----
+MIIB5DCBzQIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNDMyN1oXDTI0MDgxODEzNDMyN1owFDASAgECFw0xNDA4MjExMzQz
+MjdaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACSGc41/VRXQ
+1opHU8+X9+U9C0rq+wJqLnnGsTiyrPDAZEewPq1OLpTmZO15NL10wNQ9uaG7OIlc
+Amqta9w7ZDRqLUyQNoKVDBmI4qO/jhtWmDcyh+3wvd3iDfmA3PKltO7Zu4P+uDoT
+4Nr8BHf7zvnFKlSn8DQJKrI9RhtI5ugWx6E8iIxyzcxT3PhUYx+5i+os5SbFtKSf
+i+FshZvGY28vrhjFaiPwWCeFXA8BBNrSi96eq0YAIgco4e9GkZAGWJUFaGdYbmeo
+CwYac9kEGMmj5OPWlKPhXOUIG7Odqz7qILEE5ZDhQlSyWLtRGkiHYLCVSi7OoE+M
+F21rTDeqTdc=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_expired.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..c9b3abb05a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/crl_expired.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Jul 21 19:45:56 2014 GMT
+ Next Update: Jul 21 20:45:56 2014 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:e8:6d:51:fc:0e:66:08:22:b2:4d:fb:da:7a:5f:4d:d1:a0:
+ 80:f0:18:f3:c5:ca:c7:05:6c:70:59:fa:d5:96:68:fa:c7:1d:
+ 7e:fb:53:3b:4a:8f:ed:bb:51:04:e8:fb:db:d7:b8:96:d9:e2:
+ 8d:bb:54:cc:11:60:c8:20:ea:81:28:5f:e1:eb:d6:8c:94:bf:
+ 42:e0:7f:a3:13:0c:76:05:f2:f0:34:98:a3:e8:64:74:4c:cb:
+ bf:39:bb:fa:d5:2d:72:02:d1:fa:56:15:59:12:b7:ff:a3:cc:
+ c9:d6:14:ca:4a:1e:0b:b4:47:cf:58:b0:e5:24:d2:21:71:0d:
+ 2d:09:77:5c:2f:ef:40:f8:74:90:03:cc:37:2e:ea:6a:25:59:
+ c0:bf:48:90:00:55:9c:db:bf:1f:f0:7b:b6:5a:90:94:b6:8d:
+ 7c:7d:bb:2d:11:5f:0c:f5:4a:9b:c5:ed:ab:e3:fd:35:c8:76:
+ 3b:2e:41:cb:df:76:b5:f4:e9:05:72:f6:56:7a:fc:34:07:d6:
+ a2:55:eb:7c:58:33:5b:9d:3e:b2:03:89:01:c6:d1:54:75:1a:
+ 5c:73:3f:5e:2e:fd:3b:38:ed:d4:e1:fa:ec:ff:84:f0:55:ee:
+ 83:e0:f0:13:97:e7:f0:55:8c:00:a3:1a:31:e4:31:9e:68:d0:
+ 6d:3e:81:b0
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDcyMTE5NDU1NloXDTE0MDcyMTIwNDU1NlqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEAFOhtUfwOZggisk372npfTdGggPAY88XKxwVscFn6
+1ZZo+scdfvtTO0qP7btRBOj729e4ltnijbtUzBFgyCDqgShf4evWjJS/QuB/oxMM
+dgXy8DSYo+hkdEzLvzm7+tUtcgLR+lYVWRK3/6PMydYUykoeC7RHz1iw5STSIXEN
+LQl3XC/vQPh0kAPMNy7qaiVZwL9IkABVnNu/H/B7tlqQlLaNfH27LRFfDPVKm8Xt
+q+P9Nch2Oy5By992tfTpBXL2Vnr8NAfWolXrfFgzW50+sgOJAcbRVHUaXHM/Xi79
+Ozjt1OH67P+E8FXug+DwE5fn8FWMAKMaMeQxnmjQbT6BsA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/expired.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/expired.pem
new file mode 100644
index 00000000000..e1d2ceb8de8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/expired.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfzCCAmegAwIBAgIBEDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzEwMTYwMDAwWhcNMTQwNzE2MTYwMDAwWjBtMRAwDgYDVQQD
+EwdleHBpcmVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAU
+BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG
+EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPFSQZlHvJpi3dmA
+1X5U1qaUN/O/EQy5IZ5Rw+cfFHWOZ84EsLZxehWyqDZRH49Rg06xSYdO2WZOopP8
+OnUVCLGL819K83ikZ5sCbvB/gKCSCenwveEN992gJfs70HaZfiJNC7/cFigSb5Jg
+5G77E1/Uml4hIThfYG2NbCsTuP/P4JLwuzCkfgEUWRbCioMPEpIpxQw2LCx5DCy6
+Llhct0Hp14N9dZ4nA1h1621wOckgGJHw9DXdt9rGzulY1UgOOPczyqT08CdpaVxK
+VzrJCcUxfUjhO4ukHz+LBFQY+ZEm+tVboDbinbiHxY24urP46/u+BwRvBvjOovJi
+NVUh5GsCAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEw
+DQYJKoZIhvcNAQEFBQADggEBAG3rRSFCSG3hilGK9SMtTpFnrquJNlL+yG0TP8VG
+1qVt1JGaDJ8YUc5HXXtKBeLnRYii7EUx1wZIKn78RHRdITo5OJvlmcwwh0bt+/eK
+u9XFgR3z35w5UPr/YktgoX39SOzAZUoorgNw500pfxfneqCZtcRufVvjtk8TUdlN
+lcd2HfIxtUHWJeTcVM18g0JdHMYdMBXDKuXOW9VWLIBC2G6nAL/8SZJtUaDllPb4
+NisuIGjfjGgNxMpEXn+sQjFTupAoJru21OtAgERWFJhKQ0hbO0kucEPKEfxHDBVG
+dKSRIl6b0XSDLfxEXPv5ZhdrK4KEw1dYYXySvIVXtn0Ys38=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA8VJBmUe8mmLd2YDVflTWppQ3878RDLkhnlHD5x8UdY5nzgSw
+tnF6FbKoNlEfj1GDTrFJh07ZZk6ik/w6dRUIsYvzX0rzeKRnmwJu8H+AoJIJ6fC9
+4Q333aAl+zvQdpl+Ik0Lv9wWKBJvkmDkbvsTX9SaXiEhOF9gbY1sKxO4/8/gkvC7
+MKR+ARRZFsKKgw8SkinFDDYsLHkMLLouWFy3QenXg311nicDWHXrbXA5ySAYkfD0
+Nd232sbO6VjVSA449zPKpPTwJ2lpXEpXOskJxTF9SOE7i6QfP4sEVBj5kSb61Vug
+NuKduIfFjbi6s/jr+74HBG8G+M6i8mI1VSHkawIDAQABAoIBAGAO1QvVkU6HAjX8
+4X6a+KJwJ2F/8aJ14trpQyixp2wv1kQce9bzjpwqdGjCm+RplvHxAgq5KTJfJLnx
+UbefOsmpoqOQ6x9fmdoK+uwCZMoFt6qGaJ63960hfVzm71D2Qk4XCxFA4xTqWb0T
+knpWuNyRfSzw1Q9ib7jL7X2sKRyx9ZP+1a41ia/Ko6iYPUUnRb1Ewo10alYVWVIE
+upeIlWqv+1DGfda9f34pGVh3ldIDh1LHqaAZhdn6sKtcgIUGcWatZRmQiA5kSflP
+VBpOI2c2tkQv0j5cPGwD7GGaJ2aKayHG0EwnoNmxCeR0Ay3MO0vBAsxn7Wy6yqrS
+EfkYhFkCgYEA/OA2AHFIH7mE0nrMwegXrEy7BZUgLRCRFWTjxwnCKFQj2Uo2dtYD
+2QQKuQWeiP+LD2nHj4n1KXuSJiB1GtmEF3JkYV4Wd7mPWEVNDHa0G8ZndquPK40s
+YSjh9u0KesUegncBFfIiwzxsk9724iaXq3aXOexc0btQB2xltRzj6/0CgYEA9E2A
+QU6pnCOzGDyOV7+TFr0ha7TXaMOb5aIVz6tJ7r5Nb7oZP9T9UCdUnw2Tls5Ce5tI
+J23O7JqwT4CudnWnk5ZtVtGBYA23mUryrgf/Utfg08hU2uRyq9LOxVaVqfV/AipN
+62GmfuxkK4PatOcAOhKqmS/zGfZqIg7V6rtX2ocCgYEAlY1ogpR8ij6mvfBgPmGr
+9nues+uBDwXYOCXlzCYKTN2OIgkQ8vEZb3RDfy9CllVDgccWfd6iPnlVcvUJLOrt
+gwxlL2x8ryvwCc1ahv+A/1g0gmtuDdy9HW0XTnjcFMWViKUm4DrGsl5+/GkF67PV
+SVOmllwifOthpjJGaHmAlmUCgYB6EFMZzlzud+PfIzqX20952Avfzd6nKL03EjJF
+rbbmA82bGmfNPfVHXC9qvRTWD76mFeMKWFJAY9XeE1SYOZb+JfYBn/I9dP0cKZdx
+nutSkCx0hK7pI6Wr9kt7zBRBdDj+cva1ufe/iQtPtrTLGHRDj9oPaibT/Qvwcmst
+umdd9wKBgQDM7j6Rh7v8AeLy2bw73Qtk0ORaHqRBHSQw87srOLwtfQzE92zSGMj+
+FVt/BdPgzyaddegKvJ9AFCPAxbA8Glnmc89FO7pcXn9Wcy+ZoZIF6YwgUPhPCp/4
+r9bKuXuQiutFbKyes/5PTXqbJ/7xKRZIpQCvxg2syrW3hxx8LIx/kQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/fts.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/geo_near_random.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..248f5e49a6c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/geo_near_random.js
@@ -0,0 +1,101 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ var bulk = this.t.initializeUnorderedBulkOp();
+ for (var i=0; i<nPts; i++){
+ bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) });
+ }
+ assert.writeOK(bulk.execute());
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/host_ipaddr.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/host_ipaddr.js
new file mode 100644
index 00000000000..7db1417e977
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/host_ipaddr.js
@@ -0,0 +1,38 @@
+// Returns non-localhost ipaddr of host running the mongo shell process
+function get_ipaddr() {
+ // set temp path, if it exists
+ var path = "";
+ try {
+ path = TestData.tmpPath;
+ if (typeof path == "undefined") {
+ path = "";
+ } else if (path.slice(-1) != "/") {
+ // Terminate path with / if defined
+ path += "/";
+ }
+ }
+ catch (err) {}
+
+ var ipFile = path+"ipaddr.log";
+ var windowsCmd = "ipconfig > "+ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipAddr = null;
+ var hostType = null;
+
+ try {
+ hostType = getBuildInfo().sysInfo.split(' ')[0];
+
+ // os-specific methods
+ if (hostType == "windows") {
+ runProgram('cmd.exe', '/c', windowsCmd);
+ ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
+ } else {
+ runProgram('bash', '-c', unixCmd);
+ ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
+ }
+ }
+ finally {
+ removeFile(ipFile);
+ }
+ return ipAddr;
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key1 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key2 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e6aca6a217d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDgTCCAmmgAwIBAgIBBTANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBvMRIwEAYDVQQD
+EwkxMjcuMC4wLjExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEW
+MBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNV
+BAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiqQNGgQggL8S
+LlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9wd0VNuD6+Ycg1mBbopO+M/K/ZWv8c
+7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9kVGRb2bNAfV2bC5/UnO1ulQdHoIB
+p3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8rwNNFvooMRg8yq8tq0qBkVhh85kct
+HHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqvI/Y5eIeZLhdIzAv37kolr8AuyqIR
+qcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZxoZN9Jv7x5LyiA+ijtQ+5aI/kMPqG
+nox+/bNFCQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAu
+MTANBgkqhkiG9w0BAQUFAAOCAQEAVJJNuUVzMRaft17NH6AzMSTiJxMFWoafmYgx
+jZnzA42XDPoPROuN7Bst6WVYDNpPb1AhPDco9qDylSZl0d341nHAuZNc84fD0omN
+Mbqieu8WseRQ300cbnS8p11c9aYpO/fNQ5iaYhGsRT7pnLs9MIgR468KVjY2xt49
+V0rshG6RxZj83KKuJd0T4X+5UeYz4B677y+SR0aoK2I2Sh+cffrMX2LotHc2I+JI
+Y9SDLvQT7chD9GzaWz634kmy3EEY0LreMm6AxhMOsr0lbZx5O8wLTScSjKARJ6OH
+nPxM1gYT07mkNmfyEnl1ChAN0MPgcLHQqEfe7x7ZQSbAv2gWfA==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAiqQNGgQggL8SLlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9w
+d0VNuD6+Ycg1mBbopO+M/K/ZWv8c7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9
+kVGRb2bNAfV2bC5/UnO1ulQdHoIBp3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8r
+wNNFvooMRg8yq8tq0qBkVhh85kctHHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqv
+I/Y5eIeZLhdIzAv37kolr8AuyqIRqcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZx
+oZN9Jv7x5LyiA+ijtQ+5aI/kMPqGnox+/bNFCQIDAQABAoIBAQAMiUT+Az2FJsHY
+G1Trf7Ba5UiS+/FDNNn7cJX++/lZQaOj9BSRVFzwuguw/8+Izxl+QIL5HlWDGupc
+tJICWwoWIuVl2S7RI6NPlhcEJF7hgzwUElnOWBfUgPEsqitpINM2e2wFSzHO3maT
+5AoO0zgUYK+8n9d74KT9CFcLqWvyS3iksK/FXfCZt0T1EoJ4LsDjeCTfVKqrku2U
++fCnZZYNkrgUI7Hku94EJfOh462V4KQAUGsvllwb1lfmR5NR86G6VX6oyMGctL5e
+1M6XQv+JQGEmAe6uULtCUGh32fzwJ9Un3j2GXOHT0LWrVc5iLuXwwzQvCGaMYtKm
+FAIDpPxhAoGBAMtwzpRyhf2op/REzZn+0aV5FWKjeq69Yxd62RaOf2EetcPwvUOs
+yQXcP0KZv15VWU/XhZUmTkPf52f0YHV/b1Sm6wUOiMNQ4XpnRj2THf0N7RS4idMm
+VwtMf1pxqttxQVKPpOvPEiTyIh2Nx/juyfD4CWkOVNTvOCd1w+av6ukNAoGBAK51
+gIXDuwJ2e5h3IJyewN/HOZqlgPKyMjnACaeXQ5wPJSrz4+UkJkuXT2dYKhv6u7K/
+GtucTdvBIJeq61+LjjkYk7OVDzoqP/uWU7p1y7gU9LZq+7tgq7r8cgeaC3IBQe7X
+jdFPEy1+zAEBh6MfFjnLZ2Kop9qbH3cNih/g9pTtAoGBAJ8dmdUtRXNByCsa7Rv2
+243qiDlf14J4CdrBcK1dwm75j/yye7VEnO2Cd8/lZHGpm3MBBC/FiA06QElkL1V2
+2GKDMun/liP9TH1p7NwYBqp3i+ha9SE6qXXi3PCmWpXLnOWwB7OPf4d6AgjPbYpb
+aYKY3PNYDC2G9IqYZyI0kSy5AoGBAJ5Fe5PfPom9c+OeL7fnTpO16kyiWZnUkDxU
+PG4OjQfHtbCCEv6PDS8G1sKq+Yjor+A5/+O8qeX0D92I8oB720txQI5rbKUYL3PP
+raY7t9YJLPlRlY8o5KN+4vSCjF+hRG+qnr6FPqDHp8xB1wvl6AQGxIR8/csVcDZR
+0j2ZmhsBAoGAO1Cpk/hWXOLAhSj8P8Q/+3439HEctTZheVBd8q/TtdwXocaZMLi8
+MXURuVTw0GtS9TmdqOFXzloFeaMhJx6TQzZ2aPcxu95b7RjEDtVHus3ed2cSJ2El
+AuRvFT2RCVvTu1mM0Ti7id+d8QBcpbIpPjNjK2Wxir/19gtEawlqlkA=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..480300f29e1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBBjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB5MRwwGgYDVQQD
+ExNzYW50ZXN0aG9zdG5hbWUuY29tMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoT
+B01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZ
+b3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJKOLTNEPv08IVmhfkv6Xq1dT6pki76ggpJ7UpwdUSsTsWDKO2o1c7wnzEjfhYQ+
+CtlEvbYyL3O7f8AaO15WJdi53SMuWS+QfCKs6b0symYbinSXlZGb4oZYFSrodSxH
++G8u+TUxyeaXgTHowMWArmTRi2LgtIwXwwHJawfhFDxji3cSmLAr5YQMAaXUynq3
+g0DEAGMaeOlyn1PkJ2ZfJsX2di+sceKb+KK1xT+2vUSsvnIumBCYqMhU6y3WjBWK
+6WrmOcsldWo4IcgyzwVRlZiuuYoe6ZsxZ4nMyTdYebALPqgkt8QVXqkgcjWK8F18
+nuqWIAn1ISTjj73H4cnzYv0CAwEAAaM8MDowOAYDVR0RBDEwL4INKi5leGFtcGxl
+LmNvbYIJMTI3LjAuMC4xgghtb3JlZnVuIYIJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+BQUAA4IBAQA5M3U4wvQYI3jz/+Eh4POrJAs9eSRGkUhz1lP7D6Fcyp+BbbXB1fa9
+5qpD4bp1ZoDP2R2zca2uwwfd3DTWPbmwFMNqs2D7d0hgX71Vg9DCAwExFjoeRo44
+cCE9kakZtE3kT/tiH6SpYpnBa3dizxTmiY48z212Pw813SSXSPMN1myx5sMJof5I
+whJNQhSQOw6WHw5swZJZT4FkzxjQMrTWdF6r0d5EU9K2WWk5DTwq4QaysplB5l0H
+8qm+fnC6xI+2qgqMO9xqc6qMtHHICXtdUOup6wj/bdeo7bAQdVDyKlFKiYivDXvO
+RJNp2cwsBgxU+qdrtOLp7/j/0R3tUqWb
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAko4tM0Q+/TwhWaF+S/perV1PqmSLvqCCkntSnB1RKxOxYMo7
+ajVzvCfMSN+FhD4K2US9tjIvc7t/wBo7XlYl2LndIy5ZL5B8IqzpvSzKZhuKdJeV
+kZvihlgVKuh1LEf4by75NTHJ5peBMejAxYCuZNGLYuC0jBfDAclrB+EUPGOLdxKY
+sCvlhAwBpdTKereDQMQAYxp46XKfU+QnZl8mxfZ2L6xx4pv4orXFP7a9RKy+ci6Y
+EJioyFTrLdaMFYrpauY5yyV1ajghyDLPBVGVmK65ih7pmzFniczJN1h5sAs+qCS3
+xBVeqSByNYrwXXye6pYgCfUhJOOPvcfhyfNi/QIDAQABAoIBADqGMkClwS2pJHxB
+hEjc+4/pklWt/ywpttq+CpgzEOXN4GiRebaJD+WUUvzK3euYRwbKb6PhWJezyWky
+UID0j/qDBm71JEJdRWUnfdPAnja2Ss0Sd3UFNimF5TYUTC5ZszjbHkOC1WiTGdGP
+a+Oy5nF2SF4883x6RLJi963W0Rjn3jIW9LoLeTgm9bjWXg3iqonCo3AjREdkR/SG
+BZaCvulGEWl/A3a7NmW5EGGNUMvzZOxrqQz4EX+VnYdb7SPrH3pmQJyJpAqUlvD5
+y7pO01fI0wg9kOWiIR0vd3Gbm9NaFmlH9Gr2oyan3CWt1h1gPzkH/V17rZzVYb5L
+RnjLdyECgYEA6X16A5Gpb5rOVR/SK/JZGd+3z52+hRR8je4WhXkZqRZmbn2deKha
+LKZi1eVl11t8zitLg/OSN1uZ/873iESKtp/R6vcGcriUCd87cDh7KTyW/7ZW5jdj
+o6Y3Liai3Xrf6dL+V2xYw964Map9oK9qatYw/L+Ke6b9wbGi+hduf1kCgYEAoK8n
+pzctajS3Ntmk147n4ZVtcv78nWItBNH2B8UaofdkBlSRyUURsEY9nA34zLNWI0f3
+k59+cR13iofkQ0rKqJw1HbTTncrSsFqptyEDt23iWSmmaU3/9Us8lcNGqRm7a35V
+Km0XBFLnE0mGFGFoTpNt8oiR4WGASJPi482xkEUCgYEAwPmQn2SDCheDEr2zAdlR
+pN3O2EwCi5DMBK3TdUsKV0KJNCajwHY72Q1HQItQ6XXWp7sGta7YmOIfXFodIUWs
+85URdMXnUWeWCrayNGSp/gHytrNoDOuYcUfN8VnDX5PPfjyBM5X7ox7vUzUakXSJ
+WnVelXZlKR9yOOTs0xAMpjkCgYAbF61N6mXD5IOHwgajObsrM/CyVP/u4WDJ0UT0
+Zm1pJbc9wgCauQSUfiNhLpHmoc5CQJ4jy96b3+YJ+4OnPPMSntPt4FFV557CkWbQ
+M8bWpLZnZjhixP4FM9xRPA2r8WTCaRifAKnC1t+TRvBOe2YE6aK+I/zEzZW9pwG4
+ezQXKQKBgQAIBSJLa6xWbfbzqyPsvmRNgiEjIamF7wcb1sRjgqWM6sCzYwYv8f5v
+9C4YhNXEn+c5V2KevgYeg6iPSQuzEAfJx64QV7JD8kEBf5GNETnuW45Yg7KwKPD6
+ZCealfpy/o9iiNqbWqDNND91pj2/g5oZnac3misJg5tGCJbJsBFXag==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockkrb5.conf b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockservice.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockuser.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/not_yet_valid.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/not_yet_valid.pem
new file mode 100644
index 00000000000..7c021c0becd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/not_yet_valid.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIBETANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMjAwNzE3MTYwMDAwWhcNMjUwNzE3MTYwMDAwWjBzMRYwFAYDVQQD
+Ew1ub3RfeWV0X3ZhbGlkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdv
+REIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQsw
+CQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2gF+Fo
+CeBKVlPyDAaEA7cjK75CxnzQy+oqw1j/vcfe/CfKL9MvDDXauR/9v1RRlww5zlxQ
+XJJtcMJtxN1EpP21cHrHCpJ/fRsCdMfJdD9MO6gcnclEI0Odwy5YI/57rAgxEuDC
+7z4d+M6z7PLq8DIwvRuhAZVTszeyTsCCkwfTJ/pisD2Ace75pS37t/ttQp+kQ+Vl
+QrfccHYxrScQ9i0JqBfrTULDl6ST76aINOaFKWqrLLkRUvE6pEkL/iP6xXUSKOsm
+uyc0yb0PK5Y/IVdrzwWUkabWEM27RAMH+CAx2iobk6REj0fsGySBzT2CaETZPjck
+vn/LYKqr+CvYjc8CAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcu
+MC4wLjEwDQYJKoZIhvcNAQEFBQADggEBADw37jpmhj/fgCZdF1NrDKLmWxb4hovQ
+Y9PRe6GsBOc1wH8Gbe4UkYAE41WUuT3xW9YpfCHLXxC7da6dhaBISWryX7n72abM
+xbfAghV3je5JAmC0E/OzQz8tTgENxJN/c4oqCQ9nVOOLjwWiim5kF0/NY8HCc/Sg
+OG9IdseRX72CavDaPxcqR9/5KKY/pxARMeyy3/D0FIB1Fwu5h9vjHEi5fGOqcizf
+S1KHfzAmTxVtjw6HWRGKmkPX0W0/lURWVkKRxvC8KkJIeKx3fl9U1PqCw0AVi5d/
+whYn4qHNFFp4OiVzXq3b5YoBy0dlHUePCIPT2GkGlV4NQKosZMJUkKo=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAzaAX4WgJ4EpWU/IMBoQDtyMrvkLGfNDL6irDWP+9x978J8ov
+0y8MNdq5H/2/VFGXDDnOXFBckm1wwm3E3USk/bVwescKkn99GwJ0x8l0P0w7qByd
+yUQjQ53DLlgj/nusCDES4MLvPh34zrPs8urwMjC9G6EBlVOzN7JOwIKTB9Mn+mKw
+PYBx7vmlLfu3+21Cn6RD5WVCt9xwdjGtJxD2LQmoF+tNQsOXpJPvpog05oUpaqss
+uRFS8TqkSQv+I/rFdRIo6ya7JzTJvQ8rlj8hV2vPBZSRptYQzbtEAwf4IDHaKhuT
+pESPR+wbJIHNPYJoRNk+NyS+f8tgqqv4K9iNzwIDAQABAoIBAFWTmjyyOuIArhrz
+snOHv7AZUBw32DmcADGtqG1Cyi4DrHe22t6ORwumwsMArP8fkbiB2lNrEovSRkp0
+uqjH5867E1vVuJ2tt1hlVkrLmbi6Nl3JwxU/aVm7r7566kgAGmGyYsPt/PmiKamF
+Ekkq49pPlHSKNol6My0r5UCTVzO6uwW7dAa4GOQRI7bM7PVlxRVVeNzPH3yOsTzk
+smrkRgf8HbjtY7m/EHG281gu14ZQRCqzLshO2BtWbkx9dMXnNU5dRRaZ8Pe8XN0Z
+umsStcX6So6VFAqlwknZTi1/sqyIuQLfE+S9DocVQkvKFUgKpFddK8Nmqc8xPCKt
+UwR9hEECgYEA9kZ5KmUbzxQrF8Kn9G18AbZ/Cf6rE9fhs/J8OGcuuJ9QTjPO7pxV
+T7lGrIOX3dVu3+iHrYXZUZv+UTOePWx+ghqJ8ML7RdVsxAWMqh+1J0eBJKIdc9mt
+0hGkLEyyBbAlfNmvw8JugTUeZH2gA+VK9HoMTAjD+LvH164rrktauKECgYEA1b6z
+lZypAbAqnuCndcetcgatdd/bYNH5WWTgdZHqInt3k94EsUEHFNMQUbO+FNkOJ4qJ
+Jp7xrqkOUX+MPrzV5XYVapamlht9gvUtyxGq7DYndlq4mIsN5kReH++lqONBnWoG
+ZlbxvadkvPo+bK003hsl+E4F8X7xUssGGLvygG8CgYEAm/yLJkUgVgsqOER86R6n
+mtYipQv/A/SK6tU9xOPl/d46mS3LderjRjnN/9rhyAo1zfCUb14GBeDONlSBd9pO
+Ts3MbQiy6sqBt67kJ6UpspVhwPhFu2k25YVy/PQfFec591hSMaXnJEOm2nOPdKg4
+z5y2STqMFfGqZHvXAvCLp8ECgYA8oVGTmNKf9fbBBny5/iAG/jnp+8vg1O7kGqdI
+8lD14wvyV8IA/a8iixRP+Kpsg31uXe+1ktR/dNjo6UNA8JPD+RDuITmzzqx1n1KU
+DbjsNBhRjD5cluUkcjQ43uOg2oXcPxz9nqAH6hm7OUjHzwH2FsFYg9lPvXB6ybg6
+/+Uz5QKBgBxvTtLsZ3Cvvb3qezn4DdpLjlsrT6HWaTGqwEx8NYVBTFX/lT8P04tv
+NqFuQsDJ4gw0AZF7HqF49qdpnHEJ8tdHgBc/xDLFUMuKjON4IZtr0/j407K6V530
+m4q3ziHOu/lORDcZTz/YUjEzT8r7Qiv7QusWncvIWEiLSCC2dvvb
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/parallelTester.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..8c44d2df553
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/password_protected.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..25e47bc2402
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBCTANBgkqhkiG9w0BAQUFADB4MRswGQYDVQQDExJwYXNz
+d29yZF9wcm90ZWN0ZWQxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29E
+QjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJ
+BgNVBAYTAlVTMB4XDTE0MDcxNzE2MDAwMFoXDTIwMDcxNzE2MDAwMFoweDEbMBkG
+A1UEAxMScGFzc3dvcmRfcHJvdGVjdGVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNV
+BAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5l
+dyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALT4r3Hcou2auIOHeihBSjk4bKQTVqI6r/stnkul359SRfKuzVA9gMQaRRDi
+MJoxczHJzS2FX+wElzBt2EUhfu3qpUJ4gJw7H4WjLx+mNnj/+6b4HUO4eRzH5hTE
+A+qgDH40qYjFDEjiARvybWo3IlDLeI/uFwlyUj5PZBUBc1LBBzNtCBfJ2MmHLhIx
+jzTFhkJZll673LL6BPHtJclXCazqKUZDLqObW4Ei6X4hdBOdC8v8Q6GMgC4BxLe0
+wsOpKYYeM3il4BtfiqDQB5ZPG0lgo1Y7OOyFHFXBA7oNkK8lykhdyH4iLt5L9mWo
+VKyZ79VqSODFuCqWo8n8kUTgA/0CAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAntxk8a0HcuPG8Fdjckp6WL+HKQQnUKdvSk06rPF0SHpN
+Ma4eZcaumROdtAYnPKvtbpq3DRCQlj59dlWPksEcYiXqf56TgcyAz5K5g5z9RbFi
+ArvAXJNRcDz1080NWGBUTPYyiKWR3PhtlYhJZ4r7fQIWLv4mifXHViw2roXXhsAY
+ubk9HOtrqE7x6NJXgR24aybxqI6TfAKfM+LJNtMwMFrPC+GHnhqMOs/jHJS38NIB
+TrKA63TdpYUroVu23/tGLQaJz352qgF4Di91RkUfnI528goj57pX78H8KRsSNVvs
+KHVNrxtZIez+pxxjBPnyfCH81swkiAPG9fdX+Hcu5A==
+-----END CERTIFICATE-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6y3b7IxcANECAggA
+MB0GCWCGSAFlAwQBAgQQrTslOKC0GZZwq48v7niXYQSCBNBNKsTN7fyw60/EEDH0
+JUgxL83Wfb7pNP97/lV5qiclY1mwcKz44kXQaesFTzhiwzAMOpbI/ijEtsNU25wV
+wtTgjAC3Em/+5/ygrmAu7hgacIRssspovmsgw029E9iOkyBd1VIrDVMi7HLHf0iU
+2Zq18QF20az2pXNMDipmVJkpc9NvjSdqka5+375pJuisspEWCDBd11K10jzCWqB5
+q3Rm1IIeq+mql6KT1rJcUoeE0facDc9GDYBiF/MfIKQ3FrZy/psqheCfL1UDUMyc
+mnm9GJO5+bCuHkg8ni0Zo5XXsf2VEFt0yt6lSucoOP43flucQaHnFKcn+5DHjDXv
+S6Eb5wEG9qWtzwWy/9DfRbkj6FxUgT3SFgizo/uLmdqFCJCnYkHUD1OuYCDmoIXP
+VTinwgK4lO/vrGfoPQrgJmdlnwHRWYjlB8edMCbmItaj2Esh3FBS12y976+UT0Sk
+8n5HsZAEYScDyNArVhrLUZRgF+r+bgZ28TDFO0MISPCAbZjhvq6lygS3dEmdTUW3
+cFDe1deNknWxZcv4UpJW4Nq6ckxwXBfTB1VFzjp7/vXrK/Sd9t8zi6vKTO8OTqc4
+KrlLXBgz0ouP/cxhYDykUrKXE2Eb0TjeAN1txZWo3fIFzXUvDZCphQEZNUqsFUxH
+86V2lwqVzKrFq6UpTgKrfTw/2ePQn9dQgd7iFWDTWjRkbzA5aAgTSVP8xQRoIOeQ
+epXtP9202kEz3h28SZYK7QBOTTX9xNmV/dzDTsi9nXZ6KtsP/aGFE5hh95jvESx/
+wlOBAPW4HR33rSYalvQPE7RjjLZHOKuYIllUBGlTOfgdA+WUXR3KxiLNPdslPBPV
++O6aDyerhWoQwE7TFwhP/FpxL/46hOu4iq4fgqfjddBTq8z5jG3c3zzogDjoDzBF
+LEQDcbenUCGbEQ7zxXsXtr3QinJ+aAejDO38hp1h9ROb5LF53/9H2j/16nby/jPX
+7kp2weRSKGJ0B6AVuS9pTsQz4+E3icsIgBWSU6qtcUz2GO2QxnFuvT9LEVnyMNN2
+IKMIEKi2FsUMddHGXLULTANlzUMocdHrd5j81eqcFPhMOFOiHpgwiwxqZyBYOLRl
+Fe7x5dLVWoLgjJagZj8uYnJbExDsfFLjEx8p4Z+rejJIC5CqZLbz9sDgCtIL+92k
++x4mlT1Rfmz9pU+RQqik83nFFRBGWxeW9iWWEgocWtmezvnK6E241v78zkqxNkvF
+JJo7BsBw7DiEHEfLhBZYuqV2q6+kwqgYrzyGIwAJkBGrkYfalVzgR+3/uN04h005
+M3jQRpSkDVGYr3JKEAlh3Sc+JD9VPbu6/RXNwy5mY67UCgWGaFwRqJE3DC9aKfNC
+OET8m8+8oQgFzhw3pNpENsgwR+Sx3K4q0GI3YwxT02pieBFNQaw53O3B3TtoCjkk
+UsuyIWqcLonwo4I3z0kjU3gEFN+0m4E4/A1DNt0J3rsKN+toCk1FqbxQg9xTZzXu
+hYmA3HMMwugzXmCanqBhmMsniPg+dRxCIfiHZhLuEpjKxZWcMWcW4M6l/wbM+LbE
+oDcTuI9ezfPTZ3xA8hNIHBT3MhuI7EJQnvKKvJDJeyX5sAtmSsSFqhEr8QZD8RgV
+5H9eOyUdfcWxLlstcq982V0oGg==
+-----END ENCRYPTED PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/server.pem
new file mode 100644
index 00000000000..d01b336fb86
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/server.pem
@@ -0,0 +1,58 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml server.pem
+#
+# General purpose server certificate file.
+-----BEGIN CERTIFICATE-----
+MIIEZDCCA0wCBAyW2DwwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjc0MVoXDTM5MDkyNzIzMjc0MVowbDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxDzANBgNVBAMMBnNlcnZl
+cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAME6pVjavnJIORzETE+Y
+UKRONMo0dRF1Jmib/zXylTssSJDGwrxL10vfCAyXSxJqQRpJraAbdrV01e0jdal3
+Y6jkXLqxv8rWuMDaFJUbOFPjz8Ad4JsvxSgJVSBeKRw4YcGC5U9B6lkuF8oZPq65
+nhLeHZliDL2LZCep8+8YCY3zPhpQ82huf4DkOMsbPxe0/Mo5r3Z3+BIMsGrKeVlY
+TUBReMLUPAGcuAkyxN+WV6wRjlXxUOzk0txmWTzzt2dx2XLGR/Ob8fLRSm4U471P
+7lRg8gaKzVXUcFQOA6KiM+aeIcDYlL/Z+5yyO/aGY1Xxt43MkrsAq2Afx3gKQYe9
+yukCAwEAAaOCAQkwggEFMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQW
+MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUPyEOAwSRUZaUE3lS4Nsq
+2KO0mMEwgYsGA1UdIwSBgzCBgKF4pHYwdDELMAkGA1UEBhMCVVMxETAPBgNVBAgM
+CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdNb25n
+b0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggQZ
+kSMSMB8GA1UdEQQYMBaCCWxvY2FsaG9zdIIJMTI3LjAuMC4xMA0GCSqGSIb3DQEB
+CwUAA4IBAQCTAoHLEMxJF7OFKTpuAub1y9ooiryn57agjiSsV8J5EiNahbwyr/cv
+HBD+TTvOfNV+RudScxKPl+opz3qAX8dDDbGAgUdvGeDI3UTiPOjD2okVrH8hKlA1
+/cTkBZNc6b3QZ4URoG3DkGDgM2P9/fNBp0G0QiYns2hMWy8mxxTuC3zGCB6q5A48
+lGxz0eNOHvKDwhAJBOeSVKc1wtmazviU6d5TH+WjWKO5ulbMiQq8R7U1gPcIFhk+
+937EfCiHE8VQOdWJ2XHeY7XitT57ukvi9PZYDYd2Jc0at2l69MYoFetV2jM+l60R
+KIsXzgkv8Hg+OYXl/du6oWJsGujJnryk
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDBOqVY2r5ySDkc
+xExPmFCkTjTKNHURdSZom/818pU7LEiQxsK8S9dL3wgMl0sSakEaSa2gG3a1dNXt
+I3Wpd2Oo5Fy6sb/K1rjA2hSVGzhT48/AHeCbL8UoCVUgXikcOGHBguVPQepZLhfK
+GT6uuZ4S3h2ZYgy9i2QnqfPvGAmN8z4aUPNobn+A5DjLGz8XtPzKOa92d/gSDLBq
+ynlZWE1AUXjC1DwBnLgJMsTfllesEY5V8VDs5NLcZlk887dncdlyxkfzm/Hy0Upu
+FOO9T+5UYPIGis1V1HBUDgOiojPmniHA2JS/2fucsjv2hmNV8beNzJK7AKtgH8d4
+CkGHvcrpAgMBAAECggEAKRC1ozSZ1N/Ug3CUZzYPrVuRjIepeZRUHVdJ3cU4QeMf
+aOVU7X+QueckZsigg8FhJJ0T8Trk95tk+4jVsLZWz8X4jxTHlewcR6ART78BMZLw
+y/uNiG2WkPOqy743LnAtFlsjMLzpeRp6o9DJqYh5N7lMwRQ9cOr21hcgaGfLYAc7
+Bbl/565RSeuw2fmSGkUMOAnPBoJvRfcsaZnEbciLY5ollLPmV00F1elEGGUyynlE
+3WiMuh2dtT2em7pdJJCcTxeKrF6Kdl0R1CYx8Ay/N7YMg3LgBY+SmR559qEy1IA5
+sMjjd79Jg+GNxj+/8V1zHePaNy1CeQLWoVb7OLZsAQKBgQDx0SpcWU/Z2P87CCqW
+Ym8jE3FFHHrz33UWmqvJ9dmVQSHZn3UaUw9WF9L8mS7t3fNroQ1xVOoB+GkKuWC5
+BnWqyOKNnIK8jNp62M26S6nyAhEwAgLK68YHrKZde9FZYE42SNlo/OQBULfJ47KF
+S0Pldz2dkQhzLPooeedonzu1mQKBgQDMj/EcQeHBAFJ8jfXgqRWAXbYzDtK1hdiy
+WtBZFVMGlomW7Mq11148GWLVVMpQX3eyPmv+t7KfI02q+oNkJsw7BUdjzsSuUtt1
+IULuznS9c+MpzUE37fXZkcUcbTDYXH8Bp0v+5u98E5rWgb4jbM3LHaKoyyMgHeAT
+qs6nzUZx0QKBgEy21aB8ePecfsQEgDY7PhS7m0iYk20ArA2ivfMROpi+E/YNP0I9
+rXTblASsuBWgPxQbAL++d4pueyx6yPOJ2BR5LF5Ok68rZDhqxUFftadf/oHjcW1c
+xPM0U4b+u9iF0+omzEohSFgeel/jC4R77ytB29s74plfYHEs0zv+oBupAoGAb4DK
+kXeL4dMWI1SSQOqJkNYD+aLL0ixqKRX9WYGZJw8pYn4iJKqHcET6LYSxXswbB6qk
+3UwubjYlINy6f/3x9v3yowHTDZ6WhyshO+CHm7kuituaAC2ShAzEH7GZHWaVyEXz
+07b3tW9OSDyictbc+ilmP5GyzM/cE3xYdvkau/ECgYEA8P4AV6KSjLswaMTYSc+X
+/8KdBhe6r6Eqs4FdACHeRixvgiafC7PFnnxtpSLAFDWoHI2EZEkTJ564TpAksXA3
+Z5zletQt0cPe3CQVvowrYgNUtjaUNG67BzOoUa4fln+jTg1LnAT3wIQZyef/1rKU
+yHx62fbnF1NBTj3vih8aIOk=
+-----END PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers.js
new file mode 100755
index 00000000000..b752b820eae
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers.js
@@ -0,0 +1,961 @@
+// Wrap whole file in a function to avoid polluting the global namespace
+(function() {
+
+_parsePath = function() {
+ var dbpath = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--dbpath" )
+ dbpath = arguments[ i + 1 ];
+
+ if ( dbpath == "" )
+ throw Error("No dbpath specified");
+
+ return dbpath;
+}
+
+_parsePort = function() {
+ var port = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--port" )
+ port = arguments[ i + 1 ];
+
+ if ( port == "" )
+ throw Error("No port specified");
+ return port;
+}
+
+connectionURLTheSame = function( a , b ){
+
+ if ( a == b )
+ return true;
+
+ if ( ! a || ! b )
+ return false;
+
+ if( a.host ) return connectionURLTheSame( a.host, b )
+ if( b.host ) return connectionURLTheSame( a, b.host )
+
+ if( a.name ) return connectionURLTheSame( a.name, b )
+ if( b.name ) return connectionURLTheSame( a, b.name )
+
+ if( a.indexOf( "/" ) < 0 && b.indexOf( "/" ) < 0 ){
+ a = a.split( ":" )
+ b = b.split( ":" )
+
+ if( a.length != b.length ) return false
+
+ if( a.length == 2 && a[1] != b[1] ) return false
+
+ if( a[0] == "localhost" || a[0] == "127.0.0.1" ) a[0] = getHostName()
+ if( b[0] == "localhost" || b[0] == "127.0.0.1" ) b[0] = getHostName()
+
+ return a[0] == b[0]
+ }
+ else {
+ var a0 = a.split( "/" )[0]
+ var b0 = b.split( "/" )[0]
+ return a0 == b0
+ }
+}
+
+assert( connectionURLTheSame( "foo" , "foo" ) )
+assert( ! connectionURLTheSame( "foo" , "bar" ) )
+
+assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) )
+assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) )
+
+createMongoArgs = function( binaryName , args ){
+ var fullArgs = [ binaryName ];
+
+ if ( args.length == 1 && isObject( args[0] ) ){
+ var o = args[0];
+ for ( var k in o ){
+ if ( o.hasOwnProperty(k) ){
+ if ( k == "v" && isNumber( o[k] ) ){
+ var n = o[k];
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10;
+ var temp = "-";
+ while ( n-- > 0 ) temp += "v";
+ fullArgs.push( temp );
+ }
+ }
+ else {
+ fullArgs.push( "--" + k );
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] );
+ }
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ return fullArgs;
+}
+
+
+MongoRunner = function(){}
+
+MongoRunner.dataDir = "/data/db"
+MongoRunner.dataPath = "/data/db/"
+MongoRunner.usedPortMap = {}
+
+MongoRunner.VersionSub = function(regex, version) {
+ this.regex = regex;
+ this.version = version;
+}
+
+// These patterns allow substituting the binary versions used for each
+// version string to support the dev/stable MongoDB release cycle.
+MongoRunner.binVersionSubs = [ new MongoRunner.VersionSub(/^latest$/, ""),
+ new MongoRunner.VersionSub(/^oldest-supported$/, "1.8"),
+ // To-be-updated when 2.8 becomes available
+ new MongoRunner.VersionSub(/^last-stable$/, "2.6"),
+ // Latest unstable and next stable are effectively the
+ // same release
+ new MongoRunner.VersionSub(/^2\.7(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^2\.8(\..*){0,1}/, "") ];
+
+MongoRunner.getBinVersionFor = function(version) {
+
+ // If this is a version iterator, iterate the version via toString()
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.toString();
+ }
+
+ // No version set means we use no suffix, this is *different* from "latest"
+ // since latest may be mapped to a different version.
+ if (version == null) version = "";
+ version = version.trim();
+ if (version === "") return "";
+
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.regex.test(version)) {
+ version = sub.version;
+ }
+ }
+
+ return version;
+}
+
+MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+
+ versionA = MongoRunner.getBinVersionFor(versionA);
+ versionB = MongoRunner.getBinVersionFor(versionB);
+
+ if (versionA === "" || versionB === "") {
+ return versionA === versionB;
+ }
+
+ return versionA.startsWith(versionB) ||
+ versionB.startsWith(versionA);
+}
+
+MongoRunner.logicalOptions = { runId : true,
+ pathOpts : true,
+ remember : true,
+ noRemember : true,
+ appendOptions : true,
+ restart : true,
+ noCleanData : true,
+ cleanData : true,
+ startClean : true,
+ forceLock : true,
+ useLogFiles : true,
+ logFile : true,
+ useHostName : true,
+ useHostname : true,
+ noReplSet : true,
+ forgetPort : true,
+ arbiter : true,
+ noJournalPrealloc : true,
+ noJournal : true,
+ binVersion : true,
+ waitForConnect : true }
+
+MongoRunner.toRealPath = function( path, pathOpts ){
+
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {}
+ path = path.replace( /\$dataPath/g, MongoRunner.dataPath )
+ path = path.replace( /\$dataDir/g, MongoRunner.dataDir )
+ for( key in pathOpts ){
+ path = path.replace( RegExp( "\\$" + RegExp.escape(key), "g" ), pathOpts[ key ] )
+ }
+
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
+
+ if( ! path.match( /^(\/|\\|[A-Za-z]:)/ ) ){
+ if( path != "" && ! path.endsWith( "/" ) )
+ path += "/"
+
+ path = MongoRunner.dataPath + path
+ }
+
+ return path
+
+}
+
+MongoRunner.toRealDir = function( path, pathOpts ){
+
+ path = MongoRunner.toRealPath( path, pathOpts )
+
+ if( path.endsWith( "/" ) )
+ path = path.substring( 0, path.length - 1 )
+
+ return path
+}
+
+MongoRunner.toRealFile = MongoRunner.toRealDir
+
+MongoRunner.nextOpenPort = function(){
+
+ var i = 0;
+ while( MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] ) i++;
+ MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] = true
+
+ return 27000 + i
+
+}
+
+/**
+ * Returns an iterator object which yields successive versions on toString(), starting from a
+ * random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on toString()
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+MongoRunner.versionIterator = function( arr, isRandom ){
+
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if( typeof arr == "string" ) return arr
+ if( arr.isVersionIterator ) return arr
+
+ if (isRandom == undefined) isRandom = false;
+
+ // Starting pos
+ var i = isRandom ? parseInt( Random.rand() * arr.length ) : 0;
+
+ return new MongoRunner.versionIterator.iterator(i, arr);
+}
+
+MongoRunner.versionIterator.iterator = function(i, arr) {
+
+ this.toString = function() {
+ i = ( i + 1 ) % arr.length
+ print( "Returning next version : " + i +
+ " (" + arr[i] + ") from " + tojson( arr ) + "..." );
+ return arr[ i ]
+ }
+
+ this.isVersionIterator = true;
+
+}
+
+/**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+MongoRunner.arrOptions = function( binaryName , args ){
+
+ var fullArgs = [ "" ]
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if ( isObject( args ) || ( args.length == 1 && isObject( args[0] ) ) ){
+
+ var o = isObject( args ) ? args : args[0]
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion != "") {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function( option, value ){
+
+ if( ! o.binVersion ) return true
+
+ // Version 1.x options
+ if( o.binVersion.startsWith( "1." ) ){
+
+ return [ "nopreallocj" ].indexOf( option ) < 0
+ }
+
+ return true
+ }
+
+ for ( var k in o ){
+
+ // Make sure our logical option should be added to the array of options
+ if( ! o.hasOwnProperty( k ) ||
+ k in MongoRunner.logicalOptions ||
+ ! isValidOptionForBinary( k, o[k] ) ) continue
+
+ if ( ( k == "v" || k == "verbose" ) && isNumber( o[k] ) ){
+ var n = o[k]
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10
+ var temp = "-"
+ while ( n-- > 0 ) temp += "v"
+ fullArgs.push( temp )
+ }
+ }
+ else {
+ if( o[k] == undefined || o[k] == null ) continue
+ fullArgs.push( "--" + k )
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] )
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ fullArgs[ 0 ] = binaryName
+ return fullArgs
+}
+
+MongoRunner.arrToOpts = function( arr ){
+
+ var opts = {}
+ for( var i = 1; i < arr.length; i++ ){
+ if( arr[i].startsWith( "-" ) ){
+ var opt = arr[i].replace( /^-/, "" ).replace( /^-/, "" )
+
+ if( arr.length > i + 1 && ! arr[ i + 1 ].startsWith( "-" ) ){
+ opts[ opt ] = arr[ i + 1 ]
+ i++
+ }
+ else{
+ opts[ opt ] = ""
+ }
+
+ if( opt.replace( /v/g, "" ) == "" ){
+ opts[ "verbose" ] = opt.length
+ }
+ }
+ }
+
+ return opts
+}
+
+MongoRunner.savedOptions = {}
+
+MongoRunner.mongoOptions = function( opts ){
+
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
+
+ // If we're a mongo object
+ if( opts.getDB ){
+ opts = { restart : opts.runId }
+ }
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge( opts || {}, {} )
+
+ if( ! opts.restart ) opts.restart = false
+
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if( opts.restart && opts.restart.getDB ){
+ opts.runId = opts.restart.runId
+ opts.restart = true
+ }
+ // If it's the runId itself
+ else if( isObject( opts.restart ) ){
+ opts.runId = opts.restart
+ opts.restart = true
+ }
+
+ if( isObject( opts.remember ) ){
+ opts.runId = opts.remember
+ opts.remember = true
+ }
+ else if( opts.remember == undefined ){
+ // Remember by default if we're restarting
+ opts.remember = opts.restart
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if( isObject( opts.runId ) && opts.runId.runId ) opts.runId = opts.runId.runId
+
+ if( opts.restart && opts.remember ) opts = Object.merge( MongoRunner.savedOptions[ opts.runId ], opts )
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId()
+
+ // Save the port if required
+ if( ! opts.forgetPort ) opts.port = opts.port || MongoRunner.nextOpenPort()
+
+ var shouldRemember = ( ! opts.restart && ! opts.noRemember ) || ( opts.restart && opts.appendOptions )
+
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ if ( shouldRemember ){
+ MongoRunner.savedOptions[ opts.runId ] = Object.merge( opts, {} )
+ }
+
+ // Default for waitForConnect is true
+ opts.waitForConnect = (waitForConnect == undefined || waitForConnect == null) ?
+ true : waitForConnect;
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort()
+ MongoRunner.usedPortMap[ "" + parseInt( opts.port ) ] = true
+
+ opts.pathOpts = Object.merge( opts.pathOpts || {}, { port : "" + opts.port, runId : "" + opts.runId } )
+
+ return opts
+}
+
+/**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournalPrealloc {boolean}
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+MongoRunner.mongodOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ opts.dbpath = MongoRunner.toRealDir( opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts )
+
+ opts.pathOpts = Object.merge( opts.pathOpts, { dbpath : opts.dbpath } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = opts.dbpath + "/mongod.log"
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ) {
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc )
+ opts.nopreallocj = ""
+
+ if( jsTestOptions().noJournal || opts.noJournal )
+ opts.nojournal = ""
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ if( opts.noReplSet ) opts.replSet = null
+ if( opts.arbiter ) opts.oplogSize = 1
+
+ return opts
+}
+
+MongoRunner.mongosOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ // Normalize configdb option to be host string if currently a host
+ if( opts.configdb && opts.configdb.getDB ){
+ opts.configdb = opts.configdb.host
+ }
+
+ opts.pathOpts = Object.merge( opts.pathOpts,
+ { configdb : opts.configdb.replace( /:|,/g, "-" ) } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = MongoRunner.toRealFile( "$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts )
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ){
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ return opts
+}
+
+/**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true
+ * forceLock {boolean}: Deletes the lock file if set to true
+ * dbpath {string}: location of db files
+ * cleanData {boolean}: Removes all files in dbpath if true
+ * startClean {boolean}: same as cleanData
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority)
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongod = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongodOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ if( opts.forceLock ) removeFile( opts.dbpath + "/mongod.lock" )
+ if( ( opts.cleanData || opts.startClean ) || ( ! opts.restart && ! opts.noCleanData ) ){
+ print( "Resetting db path '" + opts.dbpath + "'" )
+ resetDbpath( opts.dbpath )
+ }
+
+ opts = MongoRunner.arrOptions( "mongod", opts )
+ }
+
+ var mongod = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongod) return null;
+
+ mongod.commandLine = MongoRunner.arrToOpts( opts )
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port
+ mongod.host = mongod.name
+ mongod.port = parseInt( mongod.commandLine.port )
+ mongod.runId = runId || ObjectId()
+ mongod.savedOptions = MongoRunner.savedOptions[ mongod.runId ];
+ mongod.fullOptions = fullOptions;
+
+ return mongod
+}
+
+MongoRunner.runMongos = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongosOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ opts = MongoRunner.arrOptions( "mongos", opts )
+ }
+
+ var mongos = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongos) return null;
+
+ mongos.commandLine = MongoRunner.arrToOpts( opts )
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port
+ mongos.host = mongos.name
+ mongos.port = parseInt( mongos.commandLine.port )
+ mongos.runId = runId || ObjectId()
+ mongos.savedOptions = MongoRunner.savedOptions[ mongos.runId ]
+ mongos.fullOptions = fullOptions;
+
+ return mongos
+}
+
+/**
+ * Kills a mongod process.
+ *
+ * @param {number} port the port of the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * }
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+MongoRunner.stopMongod = function( port, signal, opts ){
+
+ if( ! port ) {
+ print( "Cannot stop mongo process " + port )
+ return
+ }
+
+ signal = signal || 15
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ var exitCode = stopMongod( parseInt( port ), parseInt( signal ), opts )
+
+ delete MongoRunner.usedPortMap[ "" + parseInt( port ) ]
+
+ return exitCode
+}
+
+MongoRunner.stopMongos = MongoRunner.stopMongod
+
+MongoRunner.isStopped = function( port ){
+
+ if( ! port ) {
+ print( "Cannot detect if process " + port + " is stopped." )
+ return
+ }
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ return MongoRunner.usedPortMap[ "" + parseInt( port ) ] ? false : true
+}
+
+/**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName The name of the tool to run
+ * @param {Object} opts options to pass to the tool
+ * {
+ * binVersion {string}: version of tool to run
+ * }
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongoTool = function( binaryName, opts ){
+
+ var opts = opts || {}
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ var argsArray = MongoRunner.arrOptions(binaryName, opts)
+
+ return runMongoProgram.apply(null, argsArray);
+
+}
+
+// Given a test name figures out a directory for that test to use for dump files and makes sure
+// that directory exists and is empty.
+MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+}
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+}
+startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+}
+startMongodNoReset = function(){
+ var args = createMongoArgs( "mongod" , arguments );
+ return startMongoProgram.apply( null, args );
+}
+
+startMongos = function(args){
+ return MongoRunner.runMongos(args);
+}
+
+/**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+function appendSetParameterArgs(argArray) {
+ var programName = argArray[0];
+ if (programName.endsWith('mongod') || programName.endsWith('mongos')) {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push.apply(argArray, ['--setParameter', "enableTestCommands=1"]);
+ }
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
+ var hasAuthMechs = false;
+ for (i in argArray) {
+ if (typeof argArray[i] === 'string' &&
+ argArray[i].indexOf('authenticationMechanisms') != -1) {
+ hasAuthMechs = true;
+ break;
+ }
+ }
+ if (!hasAuthMechs) {
+ argArray.push.apply(argArray,
+ ['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ }
+ }
+ if (jsTest.options().auth) {
+ argArray.push.apply(argArray, ['--setParameter', "enableLocalhostAuthBypass=false"]);
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ if ( argArray.indexOf('--sslMode') < 0 ) {
+ argArray.push.apply(argArray, [ '--sslMode', 'requireSSL', '--sslPEMKeyFile', 'jstests/libs/server.pem', '--sslCAFile', 'jstests/libs/ca.pem', '--sslWeakCertificateValidation' ] );
+ }
+ }
+
+ // mongos only options
+ if (programName.endsWith('mongos')) {
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ var params = jsTest.options().setParametersMongos.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ // mongod only options
+ else if (programName.endsWith('mongod')) {
+ // set storageEngine for mongod
+ if (jsTest.options().storageEngine) {
+ argArray.push.apply(argArray, ['--storageEngine', jsTest.options().storageEngine]);
+ }
+ // apply setParameters for mongod
+ if (jsTest.options().setParameters) {
+ var params = jsTest.options().setParameters.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ }
+ return argArray;
+};
+
+/**
+ * Start a mongo process with a particular argument array. If we aren't waiting for connect,
+ * return null.
+ */
+MongoRunner.startWithArgs = function(argArray, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = _startMongoProgram.apply(null, argArray);
+
+ var conn = null;
+ if (waitForConnect) {
+ assert.soon( function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+ }
+
+ return conn;
+}
+
+/**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+startMongoProgram = function(){
+ var port = _parsePort.apply( null, arguments );
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = argumentsToArray( arguments );
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply( null, args );
+
+ var m;
+ assert.soon
+ ( function() {
+ try {
+ m = new Mongo( "127.0.0.1:" + port );
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ m = null;
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000 );
+
+ return m;
+}
+
+runMongoProgram = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift( progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin'
+ );
+ }
+
+ if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) {
+ args.push("--dialTimeout", "30");
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ args.push("--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHostnames");
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _runMongoProgram.apply( null, args );
+}
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin');
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _startMongoProgram.apply( null, args );
+}
+
+myPort = function() {
+ var m = db.getMongo();
+ if ( m.host.match( /:/ ) )
+ return m.host.match( /:(.*)/ )[ 1 ];
+ else
+ return 27017;
+}
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers_misc.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers_misc.js
new file mode 100644
index 00000000000..4f6d3f9b9ef
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/servers_misc.js
@@ -0,0 +1,357 @@
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function( port, dbpath, peer, arbiter, extraArgs, options ) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function( reuseData ) {
+ var args = [];
+ if ( reuseData ) {
+ args.push( "mongod" );
+ }
+ args.push( "--port" );
+ args.push( this.port_ );
+ args.push( "--dbpath" );
+ args.push( this.dbpath_ );
+ args.push( "--nohttpinterface" );
+ args.push( "--noprealloc" );
+ args.push( "--smallfiles" );
+ if (!this.options_.no_bind) {
+ args.push( "--bind_ip" );
+ args.push( "127.0.0.1" );
+ }
+ if ( this.extraArgs_ ) {
+ args = args.concat( this.extraArgs_ );
+ }
+ removeFile( this.dbpath_ + "/mongod.lock" );
+ if ( reuseData ) {
+ return startMongoProgram.apply( null, args );
+ } else {
+ return startMongod.apply( null, args );
+ }
+}
+
+MongodRunner.prototype.port = function() { return this.port_; }
+
+MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }
+
+ToolTest = function( name, extraOptions ){
+ this.name = name;
+ this.options = extraOptions;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = MongoRunner.dataPath + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ this.useSSL = jsTestOptions().useSSL
+ resetDbpath( this.dbpath );
+ resetDbpath( this.ext );
+}
+
+ToolTest.prototype.startDB = function( coll ){
+ assert( ! this.m , "db already running" );
+
+ var options = {port : this.port,
+ dbpath : this.dbpath,
+ nohttpinterface : "",
+ noprealloc : "",
+ smallfiles : "",
+ bind_ip : "127.0.0.1"};
+
+ Object.extend(options, this.options);
+
+ if ( this.useSSL ) {
+ Object.extend(options, { sslMode: "requireSSL", sslPEMKeyFile: "jstests/libs/server.pem", sslCAFile: "jstests/libs/ca.pem", sslWeakCertificateValidation: "" } );
+ }
+
+ this.m = startMongoProgram.apply(null, MongoRunner.arrOptions("mongod", options));
+ this.db = this.m.getDB( this.baseName );
+ if ( coll )
+ return this.db.getCollection( coll );
+ return this.db;
+}
+
+ToolTest.prototype.stop = function(){
+ if ( ! this.m )
+ return;
+ stopMongod( this.port );
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+}
+
+ToolTest.prototype.runTool = function(){
+ var a = [ "mongo" + arguments[0] ];
+
+ var hasdbpath = false;
+
+ for ( var i=1; i<arguments.length; i++ ){
+ a.push( arguments[i] );
+ if ( arguments[i] == "--dbpath" )
+ hasdbpath = true;
+ }
+
+ if ( this.useSSL ) {
+ a = a.concat(["--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHostnames"]);
+ }
+
+ if ( ! hasdbpath ){
+ a.push( "--host" );
+ a.push( "127.0.0.1:" + this.port );
+ }
+
+ return runMongoProgram.apply( null , a );
+}
+
+
+ReplTest = function( name, ports ){
+ this.name = name;
+ this.ports = ports || allocatePorts( 2 );
+}
+
+ReplTest.prototype.getPort = function( master ){
+ if ( master )
+ return this.ports[ 0 ];
+ return this.ports[ 1 ]
+}
+
+ReplTest.prototype.getPath = function( master ){
+ var p = MongoRunner.dataPath + this.name + "-";
+ if ( master )
+ p += "master";
+ else
+ p += "slave"
+ return p;
+}
+
+ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norepl ){
+
+ if ( ! extra )
+ extra = {};
+
+ if ( ! extra.oplogSize )
+ extra.oplogSize = "40";
+
+ var a = []
+ if ( putBinaryFirst )
+ a.push( "mongod" )
+ a.push( "--nohttpinterface", "--noprealloc", "--bind_ip" , "127.0.0.1" , "--smallfiles" );
+
+ a.push( "--port" );
+ a.push( this.getPort( master ) );
+
+ a.push( "--dbpath" );
+ a.push( this.getPath( master ) );
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+ if( jsTestOptions().keyFile ) {
+ a.push( "--keyFile" )
+ a.push( jsTestOptions().keyFile )
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!a.contains("--sslMode")) {
+ a.push( "--sslMode" )
+ a.push( "requireSSL" )
+ }
+ if (!a.contains("--sslPEMKeyFile")) {
+ a.push( "--sslPEMKeyFile" )
+ a.push( "jstests/libs/server.pem" )
+ }
+ if (!a.contains("--sslCAFile")) {
+ a.push( "--sslCAFile" )
+ a.push( "jstests/libs/ca.pem" )
+ }
+ a.push( "--sslWeakCertificateValidation" )
+ }
+ if( jsTestOptions().useX509 && !a.contains("--clusterAuthMode")) {
+ a.push( "--clusterAuthMode" )
+ a.push( "x509" )
+ }
+
+ if ( !norepl ) {
+ if ( master ){
+ a.push( "--master" );
+ }
+ else {
+ a.push( "--slave" );
+ a.push( "--source" );
+ a.push( "127.0.0.1:" + this.ports[0] );
+ }
+ }
+
+ for ( var k in extra ){
+ var v = extra[k];
+ if( k in MongoRunner.logicalOptions ) continue
+ a.push( "--" + k );
+ if ( v != null )
+ a.push( v );
+ }
+
+ return a;
+}
+
+ReplTest.prototype.start = function( master , options , restart, norepl ){
+ var lockFile = this.getPath( master ) + "/mongod.lock";
+ removeFile( lockFile );
+ var o = this.getOptions( master , options , restart, norepl );
+
+ if (restart) {
+ return startMongoProgram.apply(null, o);
+ } else {
+ var conn = startMongod.apply(null, o);
+ if (jsTestOptions().keyFile || jsTestOptions().auth || jsTestOptions().useX509) {
+ jsTest.authenticate(conn);
+ }
+ return conn;
+ }
+}
+
+ReplTest.prototype.stop = function( master , signal ){
+ if ( arguments.length == 0 ){
+ this.stop( true );
+ this.stop( false );
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return stopMongod( this.getPort( master ) , signal || 15 );
+}
+
+allocatePorts = function( n , startPort ) {
+ var ret = [];
+ var start = startPort || 31000;
+ for( var i = start; i < start + n; ++i )
+ ret.push( i );
+ return ret;
+}
+
+
+SyncCCTest = function( testName , extraMongodOptions ){
+ this._testName = testName;
+ this._connections = [];
+
+ for ( var i=0; i<3; i++ ){
+ this._connections.push( startMongodTest( 30000 + i , testName + i , false, extraMongodOptions ) );
+ }
+
+ this.url = this._connections.map( function(z){ return z.name; } ).join( "," );
+ this.conn = new Mongo( this.url );
+}
+
+SyncCCTest.prototype.stop = function(){
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+}
+
+SyncCCTest.prototype.checkHashes = function( dbname , msg ){
+ var hashes = this._connections.map(
+ function(z){
+ return z.getDB( dbname ).runCommand( "dbhash" );
+ }
+ );
+
+ for ( var i=1; i<hashes.length; i++ ){
+ assert.eq( hashes[0].md5 , hashes[i].md5 , "checkHash on " + dbname + " " + msg + "\n" + tojson( hashes ) )
+ }
+}
+
+SyncCCTest.prototype.tempKill = function( num ){
+ num = num || 0;
+ stopMongod( 30000 + num );
+}
+
+SyncCCTest.prototype.tempStart = function( num ){
+ num = num || 0;
+ this._connections[num] = startMongodTest( 30000 + num , this._testName + num , true );
+}
+
+
+function startParallelShell( jsCode, port, noConnect ){
+ var x;
+
+ var args = ["mongo"];
+
+ // Convert function into call-string
+ if (typeof(jsCode) == "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ }
+ else if(typeof(jsCode) == "string") {}
+ // do nothing
+ else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
+ if (noConnect) {
+ args.push("--nodb");
+ } else if (typeof(db) == "object") {
+ jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db == "object") {
+ var hostAndPort = db.getMongo().host.split(':');
+ var host = hostAndPort[0];
+ args.push("--host", host);
+ if (!port && hostAndPort.length >= 2) {
+ var port = hostAndPort[1];
+ }
+ }
+ if (port) {
+ args.push("--port", port);
+ }
+
+ if( jsTestOptions().useSSL ) {
+ args.push( "--ssl" )
+ args.push( "--sslPEMKeyFile" )
+ args.push( "jstests/libs/client.pem" )
+ args.push( "--sslCAFile" )
+ args.push( "jstests/libs/ca.pem" )
+ }
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function(){
+ waitProgram( x );
+ };
+}
+
+var testingReplication = false;
+
+function skipIfTestingReplication(){
+ if (testingReplication) {
+ print("skipIfTestingReplication skipping");
+ quit(0);
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..f5f89643f16
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed successfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/smoke.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..7dddf222386
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/smoke.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIBCDANBgkqhkiG9w0BAQUFADBrMQ4wDAYDVQQDEwVzbW9r
+ZTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1O
+ZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwHhcN
+MTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBrMQ4wDAYDVQQDEwVzbW9rZTEP
+MA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcg
+WW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb4fOWDomCPyYesh42pQ/bEHdK7r73
+06x1hdku9i+nytCSxhhuITGC1FA4ZIbYdQC/jgfzC0D+SDFKCCyNZA/2Pxam9y3F
+QHrueNtD9bw/OB98D6hC2fCow5OxUqWDkee2hQRTwLKDzec+H72AkwURh8oTfJsl
+LL/1YITZs9kfs59r8HG2YAT7QBbg3xBmK0wZvL4V/FY/OeeR92pIgjUU/6xm/1LU
+bhNHl5JTrXQxPpmvDb1ysiI0mMLeUz7UI+Pe/9mn91dHwgkprWyFi6VnV3/aW7DC
+nW/DklOPD8vMWu2A6iYU0fZbcj4vGM607vst5QLDMoD5Y2ilrKLiTRa5AgMBAAGj
+EDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJc64d76+eyNGX6C
+5r4IdFF3zJjkLs/NcSMReUTEv4zAdJCn7c1FNRkQBS3Ky2CeSGmiyYOhWZ7usv7x
+EvprmHouWsrQXV+o5EIW366e5wzg0c5KWO3oBIRjx4hDkRSQSjJjy5NFrc8fAW9x
+eeaHFWdqk3CHvqBhd32QYEs4+7v8hBYM3PBkj8qghXta4ZZS89cTMSjhu5s4Opje
+qUzGzoHat2VBdYzIpVOorYMFXObwCeQkCAXO5epuGZ0QhML66hc7FuOsW75kI9aW
+QXVoM/z2Gb1wbBYnwHOXtClK783S3RdV0uJun/pVj+VeHb6fyIQRmC5d0eJ0C8mY
+X+acnvA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA2+Hzlg6Jgj8mHrIeNqUP2xB3Su6+99OsdYXZLvYvp8rQksYY
+biExgtRQOGSG2HUAv44H8wtA/kgxSggsjWQP9j8WpvctxUB67njbQ/W8PzgffA+o
+QtnwqMOTsVKlg5HntoUEU8Cyg83nPh+9gJMFEYfKE3ybJSy/9WCE2bPZH7Ofa/Bx
+tmAE+0AW4N8QZitMGby+FfxWPznnkfdqSII1FP+sZv9S1G4TR5eSU610MT6Zrw29
+crIiNJjC3lM+1CPj3v/Zp/dXR8IJKa1shYulZ1d/2luwwp1vw5JTjw/LzFrtgOom
+FNH2W3I+LxjOtO77LeUCwzKA+WNopayi4k0WuQIDAQABAoIBAQDRFgAaDcLGfqQS
+Bk/iqHz2U6cMMxCW+sqAioGmPWW9iYdiOkra1meNP7T0mur7A+9tN3LpsybfZeiw
+vCsZXDAteXph1KPKcPE0uOnPqumRuB2ATCc1Qqas5CUaNju7a8/J6Jzfw1o9KVud
+4HLDw4nLTLNkalXhOLdkbp6FoZZypAgc8OnSdw7z9Kri6VndkddX3fWv4t203XwT
+AvBxvy4Qfblz6VKYRnjj2CPvo/kD+ncFEg+S6u8/LkghTX7CYeMHdTC0P9jOcEK2
+PMm3kS3sX7VkypsAirYK5QtBWxur+mINxfOBDtRlA2RaJQnikRiGb14bMkLx8Liy
+JNjEHSLdAoGBAP9+KpjniozZIbrcS79wdRrW+ARyDp1Plzyd4nQxfWmQ//nsnK5T
+EYCFXWTR/ldkAoHpD+bGGU02p1+1u4vmWqw/x+Qy56Gh/eylhe0RvYEjkVLyreuc
+bXu0BFlKVgRlBq1ZyXnr2lz3bAIZxvZs13lZn6qVPMt7w2/JTCal9jw7AoGBANxR
+sGik9mq/678nzLiNlf/LcwIz7siuyISoWDOaVEVva0uorqctVqL95w0f+3FXqBO/
+5BiJRFo5D8SfzRjkNkJ7V+rm+7/CjtsjEw2Ue+ZJYPlm+Wr545GYmhU9QH9NLZIN
+JBwTVWjLgdsyQyi0Gc+xMraBwEwoyS8cO17uHO2bAoGBANRmO91/6BPt0ve4epR9
+Vi1o9yki9PlcmHtBOmikWAFyFQvd4+eckVlKBflyBkL6locPjTOqDpC9VengeDj2
+2PyHzZLtqtkZhbK9bJhIfkWknwTZUTMliXMkldTxUo82uZVVpoRgSdmtq7IXYeut
+UnjExFMY3EDB9BizvUYIBKvPAoGAViQ6bS/SiPpxGlRdXus88r6BQSM9AYoVLIkF
+s2dr+5oMwZA6eXLopOHRLPiMP0yekto8PLuu1ffpil9QuaLA9E11moqlc9yGLngQ
+QwcDSo72M41nh8Qcjhi0ZgmE5kEuyCQLMk783fRz2VhVmdyRGvuVcHZa0WxA/QJ0
+1DEVbnECgYEA3i2PGHUvU2TIFNvubw3qdH5y7FXafF+O0ulQ8e6r/CbVAG14Z6xP
+RHLc7/JIYK9CG1PWCbkjiHZ4MsKFuRWFrUMrwSj8M3euCaEIxa/Co60qQ/CnZiZ6
+geleTtUcTZ2T0pqGLnrHwlzhLpCkPJPyjcfQjjEZRwd0bVFX6b3C/rw=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/test_background_ops.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..b3f6f593947
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw Error("Error in parallel ops " + procName + " : "
+ + tojson( result.err ) )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/testconfig b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/testconfig
new file mode 100644
index 00000000000..4b09f37ad13
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/testconfig
@@ -0,0 +1,6 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
+help = false
+sysinfo = false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/replsets/rslib.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..6a16db232e4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getPrimary().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csv1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csv1.js
new file mode 100644
index 00000000000..e95d8aa8b41
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csv1.js
@@ -0,0 +1,43 @@
+// csv1.js
+
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.docEq( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"}, a[1], "csv parse 1" );
+assert.docEq( base, a[0], "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base, x, "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport2.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..2dc87b3c641
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvexport2.js
@@ -0,0 +1,32 @@
+// csvexport2.js
+
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvimport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..87320afec87
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/csvimport1.js
@@ -0,0 +1,41 @@
+// csvimport1.js
+
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.docEq( base[i], a[i], "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.docEq( base[i], x[i], "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/a.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpauth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..af6706d107d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpauth.js
@@ -0,0 +1,29 @@
+// dumpauth.js
+// test mongodump with authentication
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+x = runMongoProgram( "mongodump",
+ "--db", "admin",
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "-vvv",
+ "--collection", baseName+".testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpfilename1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..38b430896bf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpfilename1.js
@@ -0,0 +1,13 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+assert.writeOK(c.getCollection("df/").insert({ a: 3 }));
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..a0f6f844d9e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore1.js
@@ -0,0 +1,32 @@
+// dumprestore1.js
+
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
+
+// Ensure that --db and --collection are provided when filename is "-" (stdin).
+ret = t.runTool( "restore" , "--collection" , "coll", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
+ret = t.runTool( "restore" , "--db" , "db", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore10.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..5a9426dd7c4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore10.js
@@ -0,0 +1,64 @@
+// simple test to ensure write concern functions as expected
+
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..45067c7ff06
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore3.js
@@ -0,0 +1,61 @@
+// dumprestore3.js
+
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, 1, "mongorestore should exit w/ 1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore4.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..337d9c34265
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore4.js
@@ -0,0 +1,43 @@
+// dumprestore4.js -- see SERVER-2186
+
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , c.getIndexes().length , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , c.getIndexes().length , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.dumprestore4.getIndexes().length , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.dumprestore4.getIndexes().length , "after restore 2" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore6.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..1ea55e40f5c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore6.js
@@ -0,0 +1,54 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+if(db.serverStatus().storageEngine.name == "mmapv1") {
+ assert.eq( 0 , c.count() , "setup1" );
+
+ t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+ assert.soon( "c.findOne()" , "no data after sleep" );
+ assert.eq( 1 , c.count() , "after restore" );
+
+ var indexes = c.getIndexes();
+ assert.eq( 2, indexes.length, "there aren't the correct number of indexes" );
+ var aIndex = null;
+ indexes.forEach(function(index) {
+ if (index.name === "a_1") {
+ aIndex = index;
+ }
+ });
+ assert.neq(null, aIndex, "index doesn't exist" );
+ assert.eq( 1 , aIndex.v, "index version wasn't updated");
+
+ assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+ db.dropDatabase()
+ assert.eq( 0 , c.count() , "after drop" );
+
+ t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+ assert.soon( "c.findOne()" , "no data after sleep2" );
+ assert.eq( 1 , c.count() , "after restore2" );
+
+ indexes = c.getIndexes();
+ assert.eq( 2, indexes.length, "there aren't the correct number of indexes" );
+ aIndex = null;
+ indexes.forEach(function(index) {
+ if (index.name === "a_1") {
+ aIndex = index;
+ }
+ });
+ assert.neq(null, aIndex, "index doesn't exist" );
+ assert.eq( 0 , aIndex.v, "index version wasn't maintained")
+
+ assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+}else{
+ print("skipping index version test on non-mmapv1 storage engine")
+}
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore7.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..a71725f434b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore8.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..edc1a874343
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore8.js
@@ -0,0 +1,107 @@
+// dumprestore8.js
+
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000, max:10});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped" );
+assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore");
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped2" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore 2");
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.baz.getIndexes().length , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore9.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..cef9a623cf1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, { chunksize : 1, enableBalancer : 1 } );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..bfd6f4fa579
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,114 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+// We turn this off to prevent the server from touching the 'options' field in system.namespaces.
+// This is important because we check exact values of the 'options' field in this test.
+db.adminCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var defaultFlags = {}
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..4bda54a5bdc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,117 @@
+// dumprestore_auth.js
+
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+var dbName = c.getDB().toString();
+print("DB is ",dbName);
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+// Add user defined roles & users with those roles
+var testUserAdmin = c.getDB().getSiblingDB(dbName);
+var backupActions = ["find","listCollections", "listIndexes"];
+testUserAdmin.createRole({role: "backupFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:backupActions},
+ {resource: {db: dbName, collection: "" },
+ actions: backupActions}],
+ roles: []});
+testUserAdmin.createUser({user: 'backupFoo', pwd: 'password', roles: ['backupFoo']});
+
+var restoreActions = ["collMod", "createCollection","createIndex","dropCollection","insert","listCollections","listIndexes"];
+var restoreActionsFind = restoreActions;
+restoreActionsFind.push("find");
+testUserAdmin.createRole({role: "restoreChester",
+ privileges: [{resource: {db: dbName, collection: "chester"}, actions: restoreActions},
+ {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
+ ],
+ roles: []});
+testUserAdmin.createRole({role: "restoreFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:restoreActions},
+ {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
+ ],
+ roles: []});
+testUserAdmin.createUser({user: 'restoreChester', pwd: 'password', roles: ['restoreChester']});
+testUserAdmin.createUser({user: 'restoreFoo', pwd: 'password', roles: ['restoreFoo']});
+
+var sysUsers = adminDB.system.users.count();
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+
+var collections = c.getDB().getCollectionInfos();
+var fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 1");
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore should fail without user & pass
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern" ,"0");
+assert.eq(0 , c.count() , "after restore without auth");
+
+// Restore should pass with authorized user
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+
+collections = c.getDB().getCollectionInfos();
+fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 2");
+
+assert.eq(sysUsers, adminDB.system.users.count());
+
+// Dump & restore DB/colection with user defined roles
+t.runTool("dump" , "--out" , t.ext, "--username", "backupFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore with wrong user
+t.runTool("restore" , "--username", "restoreChester", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.eq(0 , c.count() , "after restore with wrong user");
+
+// Restore with proper user
+t.runTool("restore" , "--username", "restoreFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 3");
+assert.eq(22 , c.findOne().a , "after restore 3");
+
+collections = c.getDB().getCollectionInfos();
+fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 3");
+
+assert.eq(sysUsers, adminDB.system.users.count());
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth2.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..0392d1be3db
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,98 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.roles.getIndexes().length, "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop(); \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..f65bed7abff
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,200 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod); \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_excludecollections.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_excludecollections.js
new file mode 100644
index 00000000000..dcfab742053
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumprestore_excludecollections.js
@@ -0,0 +1,112 @@
+// Tests for mongodump options for excluding collections
+
+
+var testBaseName = "jstests_tool_dumprestore_excludecollections";
+
+var dumpDir = MongoRunner.dataPath + testBaseName + "_dump_external/";
+
+var mongodSource = MongoRunner.runMongod();
+var sourceDB = mongodSource.getDB(testBaseName);
+var mongodDest = MongoRunner.runMongod();
+var destDB = mongodDest.getDB(testBaseName);
+
+jsTest.log("Inserting documents into source mongod");
+sourceDB.test.insert({x:1});
+sourceDB.test2.insert({x:2});
+sourceDB.test3.insert({x:3});
+sourceDB.foo.insert({f:1});
+sourceDB.foo2.insert({f:2});
+
+jsTest.log("Testing incompabible option combinations");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection but no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and --collection");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix but " +
+ "no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix and " +
+ "--collection");
+
+jsTest.log("Testing proper behavior of collection exclusion");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test2.findOne().x, 2, "Wrong value in document");
+assert.eq(destDB.test3.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test3.findOne().x, 3, "Wrong value in document");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+// The --excludeCollection and --excludeCollectionsWithPrefix options can be specified multiple
+// times, but that is not tested here because right now MongoRunners can only be configured using
+// javascript objects which do not allow duplicate keys. See SERVER-14220.
+
+MongoRunner.stopMongod(mongodDest.port);
+MongoRunner.stopMongod(mongodSource.port);
+
+print(testBaseName + " success!");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpsecondary.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..92cd6b9fff1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/dumpsecondary.js
@@ -0,0 +1,39 @@
+
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..5e206d8c40b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport1.js
@@ -0,0 +1,67 @@
+// exportimport1.js
+
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport3.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..4f0fdd46609
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport3.js
@@ -0,0 +1,28 @@
+// exportimport3.js
+
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport4.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport5.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport6.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a6406dfa880
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport6.js
@@ -0,0 +1,27 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_bigarray.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..e8bd4a468b4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,59 @@
+// Test importing collections represented as a single line array above the maximum document size
+
+
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+var bulk = src.initializeUnorderedBulkOp();
+for (i = 0; i < numDocs; ++i) {
+ bulk.insert({ x: bigString });
+}
+assert.writeOK(bulk.execute());
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_date.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..9dc6c275a96
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_date.js
@@ -0,0 +1,50 @@
+
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_minkey_maxkey.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_minkey_maxkey.js
new file mode 100644
index 00000000000..a4705dc3ceb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/exportimport_minkey_maxkey.js
@@ -0,0 +1,38 @@
+
+var tt = new ToolTest('exportimport_minkey_maxkey_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+src.insert({ "_id" : MaxKey });
+src.insert({ "_id" : MinKey });
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('export', '--out' , tt.extFile, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('import', '--file', tt.extFile, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/files1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/files1.js
new file mode 100644
index 00000000000..3db783df19f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/files1.js
@@ -0,0 +1,28 @@
+// files1.js
+
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/restorewithauth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..d17769cf396
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/restorewithauth.js
@@ -0,0 +1,117 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+var collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection doesn't exist");
+
+//make sure it has no index except _id
+assert.eq(foo.bar.getIndexes().length, 1);
+assert.eq(foo.baz.getIndexes().length, 1);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.bar.getIndexes().length, 2);
+assert.eq(foo.baz.getIndexes().length, 1);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+collNames = foo.getCollectionNames();
+assert.eq(-1, collNames.indexOf("bar"), "bar collection already exists");
+assert.eq(-1, collNames.indexOf("baz"), "baz collection already exists");
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+collNames = foo.getCollectionNames();
+assert.eq(-1, collNames.indexOf("bar"), "bar collection was restored");
+assert.eq(-1, collNames.indexOf("baz"), "baz collection was restored");
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection was not restored");
+assert.neq(-1, collNames.indexOf("baz"), "baz collection was not restored");
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection was not restored");
+assert.neq(-1, collNames.indexOf("baz"), "baz collection was not restored");
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.bar.getIndexes().length + foo.baz.getIndexes().length, 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/stat1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/stat1.js
new file mode 100644
index 00000000000..3855d6c13c6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/stat1.js
@@ -0,0 +1,18 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 1, "mongostat should exit with -1 with eliot:wrong");
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool_replset.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..5e8aac672d6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tool_replset.js
@@ -0,0 +1,69 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tsv1.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..677bec2af9c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/legacy30/jstests/tool/tsv1.js
@@ -0,0 +1,33 @@
+// tsv1.js
+
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.docEq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.docEq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/.gitignore b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/.gitignore
new file mode 100644
index 00000000000..d045950fbf6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/.gitignore
@@ -0,0 +1,12 @@
+bsondump
+mongo
+mongod
+mongodump
+mongoexport
+mongofiles
+mongoimport
+mongoreplay
+mongorestore
+mongos
+mongostat
+mongotop
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/__init__.py
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/buildlogger.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..f8c8212a890
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/buildlogger.py
@@ -0,0 +1,491 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ # We write the pid of the spawned process as the first line of buildlogger.py's stdout because
+ # smoke.py expects to use it to terminate processes individually if already running inside a job
+ # object.
+ sys.stdout.write("[buildlogger.py] pid: %d\n" % (proc.pid))
+ sys.stdout.flush()
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/cleanbb.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmoke.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmoke.py
new file mode 100755
index 00000000000..a6cb03cb620
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmoke.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+
+"""
+Command line utility for executing MongoDB tests of all kinds.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os.path
+import random
+import signal
+import sys
+import time
+import traceback
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ from buildscripts import resmokelib
+
+
+def _execute_suite(suite, logging_config):
+ """
+ Executes each test group of 'suite', failing fast if requested.
+
+ Returns true if the execution of the suite was interrupted by the
+ user, and false otherwise.
+ """
+
+ logger = resmokelib.logging.loggers.EXECUTOR
+
+ for group in suite.test_groups:
+ if resmokelib.config.SHUFFLE:
+ logger.info("Shuffling order of tests for %ss in suite %s. The seed is %d.",
+ group.test_kind, suite.get_name(), resmokelib.config.RANDOM_SEED)
+ random.seed(resmokelib.config.RANDOM_SEED)
+ random.shuffle(group.tests)
+
+ if resmokelib.config.DRY_RUN == "tests":
+ sb = []
+ sb.append("Tests that would be run for %ss in suite %s:"
+ % (group.test_kind, suite.get_name()))
+ if len(group.tests) > 0:
+ for test in group.tests:
+ sb.append(test)
+ else:
+ sb.append("(no tests)")
+ logger.info("\n".join(sb))
+
+ # Set a successful return code on the test group because we want to output the tests
+ # that would get run by any other suites the user specified.
+ group.return_code = 0
+ continue
+
+ if len(group.tests) == 0:
+ logger.info("Skipping %ss, no tests to run", group.test_kind)
+ continue
+
+ group_config = suite.get_executor_config().get(group.test_kind, {})
+ executor = resmokelib.testing.executor.TestGroupExecutor(logger,
+ group,
+ logging_config,
+ **group_config)
+
+ try:
+ executor.run()
+ if resmokelib.config.FAIL_FAST and group.return_code != 0:
+ suite.return_code = group.return_code
+ return False
+ except resmokelib.errors.UserInterrupt:
+ suite.return_code = 130 # Simulate SIGINT as exit code.
+ return True
+ except:
+ logger.exception("Encountered an error when running %ss of suite %s.",
+ group.test_kind, suite.get_name())
+ suite.return_code = 2
+ return False
+
+
+def _log_summary(logger, suites, time_taken):
+ if len(suites) > 1:
+ sb = []
+ sb.append("Summary of all suites: %d suites ran in %0.2f seconds"
+ % (len(suites), time_taken))
+ for suite in suites:
+ suite_sb = []
+ suite.summarize(suite_sb)
+ sb.append(" %s: %s" % (suite.get_name(), "\n ".join(suite_sb)))
+
+ logger.info("=" * 80)
+ logger.info("\n".join(sb))
+
+
+def _summarize_suite(suite):
+ sb = []
+ suite.summarize(sb)
+ return "\n".join(sb)
+
+
+def _dump_suite_config(suite, logging_config):
+ """
+ Returns a string that represents the YAML configuration of a suite.
+
+ TODO: include the "options" key in the result
+ """
+
+ sb = []
+ sb.append("YAML configuration of suite %s" % (suite.get_name()))
+ sb.append(resmokelib.utils.dump_yaml({"selector": suite.get_selector_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"executor": suite.get_executor_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"logging": logging_config}))
+ return "\n".join(sb)
+
+
+def _write_report_file(suites, pathname):
+ """
+ Writes the report.json file if requested.
+ """
+
+ reports = []
+ for suite in suites:
+ for group in suite.test_groups:
+ reports.extend(group.get_reports())
+
+ combined_report_dict = resmokelib.testing.report.TestReport.combine(*reports).as_dict()
+ with open(pathname, "w") as fp:
+ json.dump(combined_report_dict, fp)
+
+
+def main():
+ start_time = time.time()
+
+ values, args = resmokelib.parser.parse_command_line()
+
+ logging_config = resmokelib.parser.get_logging_config(values)
+ resmokelib.logging.config.apply_config(logging_config)
+ resmokelib.logging.flush.start_thread()
+
+ resmokelib.parser.update_config_vars(values)
+
+ exec_logger = resmokelib.logging.loggers.EXECUTOR
+ resmoke_logger = resmokelib.logging.loggers.new_logger("resmoke", parent=exec_logger)
+
+ if values.list_suites:
+ suite_names = resmokelib.parser.get_named_suites()
+ resmoke_logger.info("Suites available to execute:\n%s", "\n".join(suite_names))
+ sys.exit(0)
+
+ interrupted = False
+ suites = resmokelib.parser.get_suites(values, args)
+ try:
+ for suite in suites:
+ resmoke_logger.info(_dump_suite_config(suite, logging_config))
+
+ suite.record_start()
+ interrupted = _execute_suite(suite, logging_config)
+ suite.record_end()
+
+ resmoke_logger.info("=" * 80)
+ resmoke_logger.info("Summary of %s suite: %s",
+ suite.get_name(), _summarize_suite(suite))
+
+ if interrupted or (resmokelib.config.FAIL_FAST and suite.return_code != 0):
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+ sys.exit(suite.return_code)
+
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+
+ # Exit with a nonzero code if any of the suites failed.
+ exit_code = max(suite.return_code for suite in suites)
+ sys.exit(exit_code)
+ finally:
+ if not interrupted:
+ resmokelib.logging.flush.stop_thread()
+
+ if resmokelib.config.REPORT_FILE is not None:
+ _write_report_file(suites, resmokelib.config.REPORT_FILE)
+
+
+if __name__ == "__main__":
+
+ def _dump_stacks(signum, frame):
+ """
+ Signal handler that will dump the stacks of all threads.
+ """
+
+ header_msg = "Dumping stacks due to SIGUSR1 signal"
+
+ sb = []
+ sb.append("=" * len(header_msg))
+ sb.append(header_msg)
+ sb.append("=" * len(header_msg))
+
+ frames = sys._current_frames()
+ sb.append("Total threads: %d" % (len(frames)))
+ sb.append("")
+
+ for thread_id in frames:
+ stack = frames[thread_id]
+ sb.append("Thread %d:" % (thread_id))
+ sb.append("".join(traceback.format_stack(stack)))
+
+ sb.append("=" * len(header_msg))
+ print "\n".join(sb)
+
+ try:
+ signal.signal(signal.SIGUSR1, _dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ main()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/__init__.py
new file mode 100644
index 00000000000..37f5a889956
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+
+from .suites import NAMED_SUITES
+from .loggers import NAMED_LOGGERS
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
new file mode 100644
index 00000000000..6511d496364
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for logger configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_loggers():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_loggers = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_loggers[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_loggers
+
+NAMED_LOGGERS = _get_named_loggers()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
new file mode 100644
index 00000000000..302d2677491
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: buildlogger
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: buildlogger
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
new file mode 100644
index 00000000000..b233de409b3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
new file mode 100644
index 00000000000..3d2d15cd5bc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
@@ -0,0 +1,19 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: executor.log
+ mode: w
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: fixture.log
+ mode: w
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: tests.log
+ mode: w
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
new file mode 100644
index 00000000000..c69bb793b0b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
@@ -0,0 +1,10 @@
+logging:
+ executor:
+ handlers:
+ - class: logging.NullHandler
+ fixture:
+ handlers:
+ - class: logging.NullHandler
+ tests:
+ handlers:
+ - class: logging.NullHandler
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
new file mode 100644
index 00000000000..e075dd22e0d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for suite configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_suites():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_suites = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_suites[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_suites
+
+NAMED_SUITES = _get_named_suites()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
new file mode 100644
index 00000000000..bc094c1f549
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
@@ -0,0 +1,27 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
new file mode 100644
index 00000000000..2a9330e2856
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
@@ -0,0 +1,38 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ useSSL: true
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ ssl: ''
+ sslAllowInvalidCertificates: ''
+ sslCAFile: jstests/libs/ca.pem
+ sslPEMKeyFile: jstests/libs/client.pem
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ sslMode: allowSSL
+ sslPEMKeyFile: jstests/libs/server.pem
+ sslCAFile: jstests/libs/ca.pem
+ sslWeakCertificateValidation: ''
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/native_cert_ssl.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/native_cert_ssl.yml
new file mode 100644
index 00000000000..175d6ac9941
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/native_cert_ssl.yml
@@ -0,0 +1,14 @@
+selector:
+ js_test:
+ roots:
+ - jstests/ssl/*.js
+
+# ssl tests start their own mongod's.
+executor:
+ js_test:
+ config:
+ shell_options:
+ nodb: ''
+ ssl: ''
+ sslAllowInvalidCertificates: ''
+ readMode: commands
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
new file mode 100644
index 00000000000..8c51a3b2f46
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
@@ -0,0 +1,23 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/archive_targets.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
new file mode 100644
index 00000000000..768b88ca6dd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
@@ -0,0 +1,21 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/gzip_targets.js');"
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/__init__.py
new file mode 100644
index 00000000000..06b0539e25b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+
+from . import errors
+from . import logging
+from . import parser
+from . import testing
+from . import utils
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/config.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/config.py
new file mode 100644
index 00000000000..ecb7fec7fa3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/config.py
@@ -0,0 +1,165 @@
+"""
+Configuration options for resmoke.py.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import time
+
+
+##
+# Default values.
+##
+
+# Default path for where to look for executables.
+DEFAULT_DBTEST_EXECUTABLE = os.path.join(os.curdir, "dbtest")
+DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo")
+DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod")
+DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos")
+
+# Default root directory for where resmoke.py puts directories containing data files of mongod's it
+# starts, as well as those started by individual tests.
+DEFAULT_DBPATH_PREFIX = os.path.normpath("/data/db")
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by resmoke.py.
+FIXTURE_SUBDIR = "resmoke"
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by individual tests.
+MONGO_RUNNER_SUBDIR = "mongorunner"
+
+# Names below correspond to how they are specified via the command line or in the options YAML file.
+DEFAULTS = {
+ "basePort": 20000,
+ "buildloggerUrl": "https://logkeeper.mongodb.org",
+ "continueOnFailure": False,
+ "dbpathPrefix": None,
+ "dbtest": None,
+ "dryRun": None,
+ "excludeWithAllTags": None,
+ "excludeWithAnyTags": None,
+ "includeWithAllTags": None,
+ "includeWithAnyTags": None,
+ "jobs": 1,
+ "mongo": None,
+ "mongod": None,
+ "mongodSetParameters": None,
+ "mongos": None,
+ "mongosSetParameters": None,
+ "nojournal": False,
+ "repeat": 1,
+ "reportFile": None,
+ "seed": long(time.time() * 256), # Taken from random.py code in Python 2.7.
+ "shellReadMode": None,
+ "shellWriteMode": None,
+ "shuffle": False,
+ "storageEngine": None,
+ "wiredTigerCollectionConfigString": None,
+ "wiredTigerEngineConfigString": None,
+ "wiredTigerIndexConfigString": None
+}
+
+
+##
+# Variables that are set by the user at the command line or with --options.
+##
+
+# The starting port number to use for mongod and mongos processes spawned by resmoke.py and the
+# mongo shell.
+BASE_PORT = None
+
+# The root url of the buildlogger server.
+BUILDLOGGER_URL = None
+
+# Root directory for where resmoke.py puts directories containing data files of mongod's it starts,
+# as well as those started by individual tests.
+DBPATH_PREFIX = None
+
+# The path to the dbtest executable used by resmoke.py.
+DBTEST_EXECUTABLE = None
+
+# If set to "tests", then resmoke.py will output the tests that would be run by each suite (without
+# actually running them).
+DRY_RUN = None
+
+# If set, then any jstests that have all of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ALL_TAGS = None
+
+# If set, then any jstests that have any of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ANY_TAGS = None
+
+# If true, then a test failure or error will cause resmoke.py to exit and not run any more tests.
+FAIL_FAST = None
+
+# If set, then only jstests that have all of the specified tags will be run during the jstest
+# portion of the suite(s).
+INCLUDE_WITH_ALL_TAGS = None
+
+# If set, then only jstests that have at least one of the specified tags will be run during the
+# jstest portion of the suite(s).
+INCLUDE_WITH_ANY_TAGS = None
+
+# If set, then resmoke.py starts the specified number of Job instances to run tests.
+JOBS = None
+
+# The path to the mongo executable used by resmoke.py.
+MONGO_EXECUTABLE = None
+
+# The path to the mongod executable used by resmoke.py.
+MONGOD_EXECUTABLE = None
+
+# The --setParameter options passed to mongod.
+MONGOD_SET_PARAMETERS = None
+
+# The path to the mongos executable used by resmoke.py.
+MONGOS_EXECUTABLE = None
+
+# The --setParameter options passed to mongos.
+MONGOS_SET_PARAMETERS = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not have journaling
+# enabled.
+NO_JOURNAL = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not preallocate
+# journal files.
+NO_PREALLOC_JOURNAL = None
+
+# If set, then the RNG is seeded with the specified value. Otherwise uses a seed based on the time
+# this module was loaded.
+RANDOM_SEED = None
+
+# If set, then each suite is repeated the specified number of times.
+REPEAT = None
+
+# If set, then resmoke.py will write out a report file with the status of each test that ran.
+REPORT_FILE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified read mode.
+SHELL_READ_MODE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified write mode.
+SHELL_WRITE_MODE = None
+
+# If true, then the order the tests run in is randomized. Otherwise the tests will run in
+# alphabetical (case-insensitive) order.
+SHUFFLE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# storage engine.
+STORAGE_ENGINE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger collection configuration settings.
+WT_COLL_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger storage engine configuration settings.
+WT_ENGINE_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger index configuration settings.
+WT_INDEX_CONFIG = None
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/__init__.py
new file mode 100644
index 00000000000..29a19a52500
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import process
+from . import programs
+from . import network
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/network.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/network.py
new file mode 100644
index 00000000000..44e54667a67
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/network.py
@@ -0,0 +1,114 @@
+"""
+Class used to allocate ports for use by various mongod and mongos
+processes involved in running the tests.
+"""
+
+from __future__ import absolute_import
+
+import collections
+import functools
+import threading
+
+from .. import config
+from .. import errors
+
+
+def _check_port(func):
+ """
+ A decorator that verifies the port returned by the wrapped function
+ is in the valid range.
+
+ Returns the port if it is valid, and raises a PortAllocationError
+ otherwise.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ port = func(*args, **kwargs)
+
+ if port < 0:
+ raise errors.PortAllocationError("Attempted to use a negative port")
+
+ if port > PortAllocator.MAX_PORT:
+ raise errors.PortAllocationError("Exhausted all available ports. Consider decreasing"
+ " the number of jobs, or using a lower base port")
+
+ return port
+
+ return wrapper
+
+
+class PortAllocator(object):
+ """
+ This class is responsible for allocating ranges of ports.
+
+ It reserves a range of ports for each job with the first part of
+ that range used for the fixture started by that job, and the second
+ part of the range used for mongod and mongos processes started by
+ tests run by that job.
+ """
+
+ # A PortAllocator will not return any port greater than this number.
+ MAX_PORT = 2 ** 16 - 1
+
+ # Each job gets a contiguous range of _PORTS_PER_JOB ports, with job 0 getting the first block
+ # of ports, job 1 getting the second block, and so on.
+ _PORTS_PER_JOB = 250
+
+ # The first _PORTS_PER_FIXTURE ports of each range are reserved for the fixtures, the remainder
+ # of the port range is used by tests.
+ _PORTS_PER_FIXTURE = 10
+
+ _NUM_USED_PORTS_LOCK = threading.Lock()
+
+ # Used to keep track of how many ports a fixture has allocated.
+ _NUM_USED_PORTS = collections.defaultdict(int)
+
+ @classmethod
+ @_check_port
+ def next_fixture_port(cls, job_num):
+ """
+ Returns the next port for a fixture to use.
+
+ Raises a PortAllocationError if the fixture has requested more
+ ports than are reserved per job, or if the next port is not a
+ valid port number.
+ """
+ with cls._NUM_USED_PORTS_LOCK:
+ start_port = config.BASE_PORT + (job_num * cls._PORTS_PER_JOB)
+ num_used_ports = cls._NUM_USED_PORTS[job_num]
+ next_port = start_port + num_used_ports
+
+ cls._NUM_USED_PORTS[job_num] += 1
+
+ if next_port >= start_port + cls._PORTS_PER_FIXTURE:
+ raise errors.PortAllocationError(
+ "Fixture has requested more than the %d ports reserved per fixture"
+ % cls._PORTS_PER_FIXTURE)
+
+ return next_port
+
+ @classmethod
+ @_check_port
+ def min_test_port(cls, job_num):
+ """
+ For the given job, returns the lowest port that is reserved for
+ use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ return config.BASE_PORT + (job_num * cls._PORTS_PER_JOB) + cls._PORTS_PER_FIXTURE
+
+ @classmethod
+ @_check_port
+ def max_test_port(cls, job_num):
+ """
+ For the given job, returns the highest port that is reserved
+ for use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ next_range_start = config.BASE_PORT + ((job_num + 1) * cls._PORTS_PER_JOB)
+ return next_range_start - 1
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/pipe.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/pipe.py
new file mode 100644
index 00000000000..bb080721b2d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/pipe.py
@@ -0,0 +1,87 @@
+"""
+Helper class to read output of a subprocess. Used to avoid deadlocks
+from the pipe buffer filling up and blocking the subprocess while it's
+being waited on.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class LoggerPipe(threading.Thread):
+ """
+ Asynchronously reads the output of a subprocess and sends it to a
+ logger.
+ """
+
+ # The start() and join() methods are not intended to be called directly on the LoggerPipe
+ # instance. Since we override them for that effect, the super's version are preserved here.
+ __start = threading.Thread.start
+ __join = threading.Thread.join
+
+ def __init__(self, logger, level, pipe_out):
+ """
+ Initializes the LoggerPipe with the specified logger, logging
+ level to use, and pipe to read from.
+ """
+
+ threading.Thread.__init__(self)
+ # Main thread should not call join() when exiting
+ self.daemon = True
+
+ self.__logger = logger
+ self.__level = level
+ self.__pipe_out = pipe_out
+
+ self.__lock = threading.Lock()
+ self.__condition = threading.Condition(self.__lock)
+
+ self.__started = False
+ self.__finished = False
+
+ LoggerPipe.__start(self)
+
+ def start(self):
+ raise NotImplementedError("start should not be called directly")
+
+ def run(self):
+ """
+ Reads the output from 'pipe_out' and logs each line to 'logger'.
+ """
+
+ with self.__lock:
+ self.__started = True
+ self.__condition.notify_all()
+
+ # Close the pipe when finished reading all of the output.
+ with self.__pipe_out:
+ # Avoid buffering the output from the pipe.
+ for line in iter(self.__pipe_out.readline, b""):
+ # Convert the output of the process from a bytestring to a UTF-8 string, and replace
+ # any characters that cannot be decoded with the official Unicode replacement
+ # character, U+FFFD. The log messages of MongoDB processes are not always valid
+ # UTF-8 sequences. See SERVER-7506.
+ line = line.decode("utf-8", "replace")
+ self.__logger.log(self.__level, line.rstrip())
+
+ with self.__lock:
+ self.__finished = True
+ self.__condition.notify_all()
+
+ def join(self, timeout=None):
+ raise NotImplementedError("join should not be called directly")
+
+ def wait_until_started(self):
+ with self.__lock:
+ while not self.__started:
+ self.__condition.wait()
+
+ def wait_until_finished(self):
+ with self.__lock:
+ while not self.__finished:
+ self.__condition.wait()
+
+ # No need to pass a timeout to join() because the thread should already be done after
+ # notifying us it has finished reading output from the pipe.
+ LoggerPipe.__join(self) # Tidy up the started thread.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/process.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/process.py
new file mode 100644
index 00000000000..f54b0f0a640
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/process.py
@@ -0,0 +1,234 @@
+"""
+A more reliable way to create and destroy processes.
+
+Uses job objects when running on Windows to ensure that all created
+processes are terminated.
+"""
+
+from __future__ import absolute_import
+
+import atexit
+import logging
+import os
+import os.path
+import sys
+import threading
+
+# The subprocess32 module resolves the thread-safety issues of the subprocess module in Python 2.x
+# when the _posixsubprocess C extension module is also available. Additionally, the _posixsubprocess
+# C extension module avoids triggering invalid free() calls on Python's internal data structure for
+# thread-local storage by skipping the PyOS_AfterFork() call when the 'preexec_fn' parameter isn't
+# specified to subprocess.Popen(). See SERVER-22219 for more details.
+#
+# The subprocess32 module is untested on Windows and thus isn't recommended for use, even when it's
+# installed. See https://github.com/google/python-subprocess32/blob/3.2.7/README.md#usage.
+if os.name == "posix" and sys.version_info[0] == 2:
+ try:
+ import subprocess32 as subprocess
+ except ImportError:
+ import warnings
+ warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
+ " available. When using the subprocess module, a child process may trigger"
+ " an invalid free(). See SERVER-22219 for more details."),
+ RuntimeWarning)
+ import subprocess
+else:
+ import subprocess
+
+from . import pipe
+from .. import utils
+
+# Attempt to avoid race conditions (e.g. hangs caused by a file descriptor being left open) when
+# starting subprocesses concurrently from multiple threads by guarding calls to subprocess.Popen()
+# with a lock. See https://bugs.python.org/issue2320 and https://bugs.python.org/issue12739 as
+# reports of such hangs.
+#
+# This lock probably isn't necessary when both the subprocess32 module and its _posixsubprocess C
+# extension module are available because either
+# (a) the pipe2() syscall is available on the platform we're using, so pipes are atomically
+# created with the FD_CLOEXEC flag set on them, or
+# (b) the pipe2() syscall isn't available, but the GIL isn't released during the
+# _posixsubprocess.fork_exec() call or the _posixsubprocess.cloexec_pipe() call.
+# See https://bugs.python.org/issue7213 for more details.
+_POPEN_LOCK = threading.Lock()
+
+# Job objects are the only reliable way to ensure that processes are terminated on Windows.
+if sys.platform == "win32":
+ import win32api
+ import win32con
+ import win32job
+ import win32process
+ import winerror
+
+ def _init_job_object():
+ job_object = win32job.CreateJobObject(None, "")
+
+ # Get the limit and job state information of the newly-created job object.
+ job_info = win32job.QueryInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation)
+
+ # Set up the job object so that closing the last handle to the job object
+ # will terminate all associated processes and destroy the job object itself.
+ job_info["BasicLimitInformation"]["LimitFlags"] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+
+ # Update the limits of the job object.
+ win32job.SetInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ return job_object
+
+ # Don't create a job object if the current process is already inside one.
+ if win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ _JOB_OBJECT = None
+ else:
+ _JOB_OBJECT = _init_job_object()
+ atexit.register(win32api.CloseHandle, _JOB_OBJECT)
+
+
+class Process(object):
+ """
+ Wrapper around subprocess.Popen class.
+ """
+
+ def __init__(self, logger, args, env=None, env_vars=None):
+ """
+ Initializes the process with the specified logger, arguments,
+ and environment.
+ """
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(args[0])[1] != ".exe":
+ args[0] += ".exe"
+
+ self.logger = logger
+ self.args = args
+ self.env = utils.default_if_none(env, os.environ.copy())
+ if env_vars is not None:
+ self.env.update(env_vars)
+
+ self.pid = None
+
+ self._process = None
+ self._stdout_pipe = None
+ self._stderr_pipe = None
+
+ def start(self):
+ """
+ Starts the process and the logger pipes for its stdout and
+ stderr.
+ """
+
+ creation_flags = 0
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ # Use unbuffered I/O pipes to avoid adding delay between when the subprocess writes output
+ # and when the LoggerPipe thread reads it.
+ buffer_size = 0
+
+ # Close file descriptors in the child process before executing the program. This prevents
+ # file descriptors that were inherited due to multiple calls to fork() -- either within one
+ # thread, or concurrently from multiple threads -- from causing another subprocess to wait
+ # for the completion of the newly spawned child process. Closing other file descriptors
+ # isn't supported on Windows when stdout and stderr are redirected.
+ close_fds = (sys.platform != "win32")
+
+ with _POPEN_LOCK:
+ self._process = subprocess.Popen(self.args,
+ bufsize=buffer_size,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=close_fds,
+ env=self.env,
+ creationflags=creation_flags)
+ self.pid = self._process.pid
+
+ self._stdout_pipe = pipe.LoggerPipe(self.logger, logging.INFO, self._process.stdout)
+ self._stderr_pipe = pipe.LoggerPipe(self.logger, logging.ERROR, self._process.stderr)
+
+ self._stdout_pipe.wait_until_started()
+ self._stderr_pipe.wait_until_started()
+
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ try:
+ win32job.AssignProcessToJobObject(_JOB_OBJECT, self._process._handle)
+ except win32job.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ def stop(self):
+ """
+ Terminates the process.
+ """
+
+ if sys.platform == "win32":
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python 2.7
+ # because earlier versions do not catch exceptions.
+ try:
+ # Have the process exit with code 0 if it is terminated by us to simplify the
+ # success-checking logic later on.
+ win32process.TerminateProcess(self._process._handle, 0)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process
+ # has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+ else:
+ try:
+ self._process.terminate()
+ except OSError as err:
+ # ESRCH (errno=3) is received when the process has already died.
+ if err.errno != 3:
+ raise
+
+ def poll(self):
+ return self._process.poll()
+
+ def wait(self):
+ """
+ Waits until the process has terminated and all output has been
+ consumed by the logger pipes.
+ """
+
+ return_code = self._process.wait()
+
+ if self._stdout_pipe:
+ self._stdout_pipe.wait_until_finished()
+ if self._stderr_pipe:
+ self._stderr_pipe.wait_until_finished()
+
+ return return_code
+
+ def as_command(self):
+ """
+ Returns an equivalent command line invocation of the process.
+ """
+
+ default_env = os.environ
+ env_diff = self.env.copy()
+
+ # Remove environment variables that appear in both 'os.environ' and 'self.env'.
+ for env_var in default_env:
+ if env_var in env_diff and env_diff[env_var] == default_env[env_var]:
+ del env_diff[env_var]
+
+ sb = [] # String builder.
+ for env_var in env_diff:
+ sb.append("%s=%s" % (env_var, env_diff[env_var]))
+ sb.extend(self.args)
+
+ return " ".join(sb)
+
+ def __str__(self):
+ if self.pid is None:
+ return self.as_command()
+ return "%s (%d)" % (self.as_command(), self.pid)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/programs.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/programs.py
new file mode 100644
index 00000000000..cdffcdf7bca
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/core/programs.py
@@ -0,0 +1,311 @@
+"""
+Utility functions to create MongoDB processes.
+
+Handles all the nitty-gritty parameter conversion.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os
+import os.path
+import stat
+
+from . import process as _process
+from .. import utils
+from .. import config
+
+
+def mongod_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongod executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOD_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOD_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOD_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ shortcut_opts = {
+ "nojournal": config.NO_JOURNAL,
+ "nopreallocj": config.NO_PREALLOC_JOURNAL,
+ "storageEngine": config.STORAGE_ENGINE,
+ "wiredTigerCollectionConfigString": config.WT_COLL_CONFIG,
+ "wiredTigerEngineConfigString": config.WT_ENGINE_CONFIG,
+ "wiredTigerIndexConfigString": config.WT_INDEX_CONFIG,
+ }
+
+ # These options are just flags, so they should not take a value.
+ opts_without_vals = ("nojournal", "nopreallocj")
+
+ # Have the --nojournal command line argument to resmoke.py unset the journal option.
+ if shortcut_opts["nojournal"] and "journal" in kwargs:
+ del kwargs["journal"]
+
+ # Ensure that config servers run with journaling enabled.
+ if "configsvr" in kwargs:
+ shortcut_opts["nojournal"] = False
+ kwargs["journal"] = ""
+
+ # Command line options override the YAML configuration.
+ for opt_name in shortcut_opts:
+ opt_value = shortcut_opts[opt_name]
+ if opt_name in opts_without_vals:
+ # Options that are specified as --flag on the command line are represented by a boolean
+ # value where True indicates that the flag should be included in 'kwargs'.
+ if opt_value:
+ kwargs[opt_name] = ""
+ else:
+ # Options that are specified as --key=value on the command line are represented by a
+ # value where None indicates that the key-value pair shouldn't be included in 'kwargs'.
+ if opt_value is not None:
+ kwargs[opt_name] = opt_value
+
+ # Override the storage engine specified on the command line with "wiredTiger" if running a
+ # config server replica set.
+ if "replSet" in kwargs and "configsvr" in kwargs:
+ kwargs["storageEngine"] = "wiredTiger"
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongos executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOS_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOS_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongo shell with arguments
+ constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
+ args = [executable]
+
+ eval_sb = [] # String builder.
+ global_vars = kwargs.pop("global_vars", {}).copy()
+
+ shortcut_opts = {
+ "noJournal": (config.NO_JOURNAL, False),
+ "noJournalPrealloc": (config.NO_PREALLOC_JOURNAL, False),
+ "storageEngine": (config.STORAGE_ENGINE, ""),
+ "testName": (os.path.splitext(os.path.basename(filename))[0], ""),
+ "wiredTigerCollectionConfigString": (config.WT_COLL_CONFIG, ""),
+ "wiredTigerEngineConfigString": (config.WT_ENGINE_CONFIG, ""),
+ "wiredTigerIndexConfigString": (config.WT_INDEX_CONFIG, ""),
+ }
+
+ test_data = global_vars.get("TestData", {}).copy()
+ for opt_name in shortcut_opts:
+ (opt_value, opt_default) = shortcut_opts[opt_name]
+ if opt_value is not None:
+ test_data[opt_name] = opt_value
+ elif opt_name not in test_data:
+ # Only use 'opt_default' if the property wasn't set in the YAML configuration.
+ test_data[opt_name] = opt_default
+ global_vars["TestData"] = test_data
+
+ # Pass setParameters for mongos and mongod through TestData. The setParameter parsing in
+ # servers.js is very primitive (just splits on commas), so this may break for non-scalar
+ # setParameter values.
+ if config.MONGOD_SET_PARAMETERS is not None:
+ if "setParameters" in test_data:
+ raise ValueError("setParameters passed via TestData can only be set from either the"
+ " command line or the suite YAML, not both")
+ mongod_set_parameters = utils.load_yaml(config.MONGOD_SET_PARAMETERS)
+ test_data["setParameters"] = _format_test_data_set_parameters(mongod_set_parameters)
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ if "setParametersMongos" in test_data:
+ raise ValueError("setParametersMongos passed via TestData can only be set from either"
+ " the command line or the suite YAML, not both")
+ mongos_set_parameters = utils.load_yaml(config.MONGOS_SET_PARAMETERS)
+ test_data["setParametersMongos"] = _format_test_data_set_parameters(mongos_set_parameters)
+
+ if "eval_prepend" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval_prepend")))
+
+ for var_name in global_vars:
+ _format_shell_vars(eval_sb, var_name, global_vars[var_name])
+
+ if "eval" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval")))
+
+ eval_str = "; ".join(eval_sb)
+ args.append("--eval")
+ args.append(eval_str)
+
+ if config.SHELL_READ_MODE is not None:
+ kwargs["readMode"] = config.SHELL_READ_MODE
+
+ if config.SHELL_WRITE_MODE is not None:
+ kwargs["writeMode"] = config.SHELL_WRITE_MODE
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ # Have the mongos shell run the specified file.
+ args.append(filename)
+
+ _set_keyfile_permissions(test_data)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_shell_vars(sb, path, value):
+ """
+ Formats 'value' in a way that can be passed to --eval.
+
+ If 'value' is a dictionary, then it is unrolled into the creation of
+ a new JSON object with properties assigned for each key of the
+ dictionary.
+ """
+
+ # Only need to do special handling for JSON objects.
+ if not isinstance(value, dict):
+ sb.append("%s = %s" % (path, json.dumps(value)))
+ return
+
+ # Avoid including curly braces and colons in output so that the command invocation can be
+ # copied and run through bash.
+ sb.append("%s = new Object()" % (path))
+ for subkey in value:
+ _format_shell_vars(sb, ".".join((path, subkey)), value[subkey])
+
+
+def dbtest_program(logger, executable=None, suites=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a dbtest executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_DBTEST_EXECUTABLE)
+ args = [executable]
+
+ if suites is not None:
+ args.extend(suites)
+
+ if config.STORAGE_ENGINE is not None:
+ kwargs["storageEngine"] = config.STORAGE_ENGINE
+
+ return generic_program(logger, args, process_kwargs=process_kwargs, **kwargs)
+
+def generic_program(logger, args, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts an arbitrary executable with
+ arguments constructed from 'kwargs'. The args parameter is an array
+ of strings containing the command to execute.
+ """
+
+ if not utils.is_string_list(args):
+ raise ValueError("The args parameter must be a list of command arguments")
+
+ _apply_kwargs(args, kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_test_data_set_parameters(set_parameters):
+ """
+ Converts key-value pairs from 'set_parameters' into the comma
+ delimited list format expected by the parser in servers.js.
+
+ WARNING: the parsing logic in servers.js is very primitive.
+ Non-scalar options such as logComponentVerbosity will not work
+ correctly.
+ """
+ params = []
+ for param_name in set_parameters:
+ param_value = set_parameters[param_name]
+ if isinstance(param_value, bool):
+ # Boolean valued setParameters are specified as lowercase strings.
+ param_value = "true" if param_value else "false"
+ elif isinstance(param_value, dict):
+ raise TypeError("Non-scalar setParameter values are not currently supported.")
+ params.append("%s=%s" % (param_name, param_value))
+ return ",".join(params)
+
+def _apply_set_parameters(args, set_parameter):
+ """
+ Converts key-value pairs from 'kwargs' into --setParameter key=value
+ arguments to an executable and appends them to 'args'.
+ """
+
+ for param_name in set_parameter:
+ param_value = set_parameter[param_name]
+ # --setParameter takes boolean values as lowercase strings.
+ if isinstance(param_value, bool):
+ param_value = "true" if param_value else "false"
+ args.append("--setParameter")
+ args.append("%s=%s" % (param_name, param_value))
+
+
+def _apply_kwargs(args, kwargs):
+ """
+ Converts key-value pairs from 'kwargs' into --key value arguments
+ to an executable and appends them to 'args'.
+
+ A --flag without a value is represented with the empty string.
+ """
+
+ for arg_name in kwargs:
+ arg_value = str(kwargs[arg_name])
+ args.append("--%s" % (arg_name))
+ if arg_value:
+ args.append(arg_value)
+
+
+def _set_keyfile_permissions(opts):
+ """
+ Change the permissions of keyfiles in 'opts' to 600, i.e. only the
+ user can read and write the file.
+
+ This necessary to avoid having the mongod/mongos fail to start up
+ because "permissions on the keyfiles are too open".
+
+ We can't permanently set the keyfile permissions because git is not
+ aware of them.
+ """
+ if "keyFile" in opts:
+ os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR)
+ if "encryptionKeyFile" in opts:
+ os.chmod(opts["encryptionKeyFile"], stat.S_IRUSR | stat.S_IWUSR)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/errors.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/errors.py
new file mode 100644
index 00000000000..6d2a704e390
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/errors.py
@@ -0,0 +1,52 @@
+"""
+Exceptions raised by resmoke.py.
+"""
+
+
+class ResmokeError(Exception):
+ """
+ Base class for all resmoke.py exceptions.
+ """
+ pass
+
+
+class StopExecution(ResmokeError):
+ """
+ Exception that is raised when resmoke.py should stop executing tests
+ if failing fast is enabled.
+ """
+ pass
+
+
+class UserInterrupt(StopExecution):
+ """
+ Exception that is raised when a user signals resmoke.py to
+ unconditionally stop executing tests.
+ """
+ pass
+
+
+class TestFailure(ResmokeError):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ determines the the previous test should be marked as a failure.
+ """
+ pass
+
+
+class ServerFailure(TestFailure):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ detects that the fixture did not exit cleanly and should be marked
+ as a failure.
+ """
+ pass
+
+
+class PortAllocationError(ResmokeError):
+ """
+ Exception that is raised by the PortAllocator if a port is requested
+ outside of the range of valid ports, or if a fixture requests more
+ ports than were reserved for that job.
+ """
+ pass
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
new file mode 100644
index 00000000000..54609ad861f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
@@ -0,0 +1,14 @@
+"""
+Extension to the logging package to support buildlogger.
+"""
+
+from __future__ import absolute_import
+
+# Alias the built-in logging.Logger class for type checking arguments. Those interested in
+# constructing a new Logger instance should use the loggers.new_logger() function instead.
+from logging import Logger
+
+from . import config
+from . import buildlogger
+from . import flush
+from . import loggers
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
new file mode 100644
index 00000000000..c5f5d40401b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
@@ -0,0 +1,284 @@
+"""
+Defines handlers for communicating with a buildlogger server.
+"""
+
+from __future__ import absolute_import
+
+import functools
+import urllib2
+
+from . import handlers
+from . import loggers
+from .. import config as _config
+
+
+CREATE_BUILD_ENDPOINT = "/build"
+APPEND_GLOBAL_LOGS_ENDPOINT = "/build/%(build_id)s"
+CREATE_TEST_ENDPOINT = "/build/%(build_id)s/test"
+APPEND_TEST_LOGS_ENDPOINT = "/build/%(build_id)s/test/%(test_id)s"
+
+_BUILDLOGGER_REALM = "buildlogs"
+_BUILDLOGGER_CONFIG = "mci.buildlogger"
+
+_SEND_AFTER_LINES = 2000
+_SEND_AFTER_SECS = 10
+
+
+def _log_on_error(func):
+ """
+ A decorator that causes any exceptions to be logged by the
+ "buildlogger" Logger instance.
+
+ Returns the wrapped function's return value, or None if an error
+ was encountered.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError as err:
+ sb = [] # String builder.
+ sb.append("HTTP Error %s: %s" % (err.code, err.msg))
+ sb.append("POST %s" % (err.filename))
+
+ for name in err.hdrs:
+ value = err.hdrs[name]
+ sb.append(" %s: %s" % (name, value))
+
+ # Try to read the response back from the server.
+ if hasattr(err, "read"):
+ sb.append(err.read())
+
+ loggers._BUILDLOGGER_FALLBACK.exception("\n".join(sb))
+ except:
+ loggers._BUILDLOGGER_FALLBACK.exception("Encountered an error.")
+ return None
+
+ return wrapper
+
+@_log_on_error
+def get_config():
+ """
+ Returns the buildlogger configuration as evaluated from the
+ _BUILDLOGGER_CONFIG file.
+ """
+
+ tmp_globals = {} # Avoid conflicts with variables defined in 'config_file'.
+ config = {}
+ execfile(_BUILDLOGGER_CONFIG, tmp_globals, config)
+
+ # Rename "slavename" to "username" if present.
+ if "slavename" in config and "username" not in config:
+ config["username"] = config["slavename"]
+ del config["slavename"]
+ # Rename "passwd" to "password" if present.
+ if "passwd" in config and "password" not in config:
+ config["password"] = config["passwd"]
+ del config["passwd"]
+
+ return config
+
+@_log_on_error
+def new_build_id(config):
+ """
+ Returns a new build id for sending global logs to.
+ """
+
+ if config is None:
+ return None
+
+ username = config["username"]
+ password = config["password"]
+ builder = config["builder"]
+ build_num = int(config["build_num"])
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=username,
+ password=password)
+
+ response = handler.post(CREATE_BUILD_ENDPOINT, data={
+ "builder": builder,
+ "buildnum": build_num,
+ })
+
+ return response["id"]
+
+@_log_on_error
+def new_test_id(build_id, build_config, test_filename, test_command):
+ """
+ Returns a new test id for sending test logs to.
+ """
+
+ if build_id is None or build_config is None:
+ return None
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=build_config["username"],
+ password=build_config["password"])
+
+ endpoint = CREATE_TEST_ENDPOINT % {"build_id": build_id}
+ response = handler.post(endpoint, data={
+ "test_filename": test_filename,
+ "command": test_command,
+ "phase": build_config.get("build_phase", "unknown"),
+ })
+
+ return response["id"]
+
+
+class _BaseBuildloggerHandler(handlers.BufferedHandler):
+ """
+ Base class of the buildlogger handler for the global logs and the
+ handler for the test logs.
+ """
+
+ def __init__(self,
+ build_id,
+ build_config,
+ capacity=_SEND_AFTER_LINES,
+ interval_secs=_SEND_AFTER_SECS):
+ """
+ Initializes the buildlogger handler with the build id and
+ credentials.
+ """
+
+ handlers.BufferedHandler.__init__(self, capacity, interval_secs)
+
+ username = build_config["username"]
+ password = build_config["password"]
+
+ self.http_handler = handlers.HTTPHandler(_BUILDLOGGER_REALM,
+ _config.BUILDLOGGER_URL,
+ username,
+ password)
+
+ self.build_id = build_id
+ self.retry_buffer = []
+
+ def process_record(self, record):
+ """
+ Returns a tuple of the time the log record was created, and the
+ message because the buildlogger expects the log messages
+ formatted in JSON as:
+
+ [ [ <log-time-1>, <log-message-1> ],
+ [ <log-time-2>, <log-message-2> ],
+ ... ]
+ """
+ msg = self.format(record)
+ return (record.created, msg)
+
+ def post(self, *args, **kwargs):
+ """
+ Convenience method for subclasses to use when making POST requests.
+ """
+
+ return self.http_handler.post(*args, **kwargs)
+
+ def _append_logs(self, log_lines):
+ raise NotImplementedError("_append_logs must be implemented by _BaseBuildloggerHandler"
+ " subclasses")
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed to the buildlogger
+ server.
+
+ If _append_logs() returns false, then the log messages are added
+ to a separate buffer and retried the next time flush() is
+ called.
+ """
+
+ self.retry_buffer.extend(self.buffer)
+
+ if self._append_logs(self.retry_buffer):
+ self.retry_buffer = []
+ elif close_called:
+ # Request to the buildlogger server returned an error, so use the fallback logger to
+ # avoid losing the log messages entirely.
+ for (_, message) in self.retry_buffer:
+ # TODO: construct an LogRecord instance equivalent to the one passed to the
+ # process_record() method if we ever decide to log the time when the
+ # LogRecord was created, e.g. using %(asctime)s in
+ # _fallback_buildlogger_handler().
+ loggers._BUILDLOGGER_FALLBACK.info(message)
+ self.retry_buffer = []
+
+ self.buffer = []
+
+
+class BuildloggerTestHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the test logs.
+ """
+
+ def __init__(self, build_id, build_config, test_id, **kwargs):
+ """
+ Initializes the buildlogger handler with the build id, test id,
+ and credentials.
+ """
+
+ _BaseBuildloggerHandler.__init__(self, build_id, build_config, **kwargs)
+
+ self.test_id = test_id
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ logs that have been captured.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
+
+ @_log_on_error
+ def _finish_test(self, failed=False):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ test status.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ self.post(endpoint, headers={
+ "X-Sendlogs-Test-Done": "true",
+ "X-Sendlogs-Test-Failed": "true" if failed else "false",
+ })
+
+ def close(self):
+ """
+ Closes the buildlogger handler.
+ """
+
+ _BaseBuildloggerHandler.close(self)
+
+ # TODO: pass the test status (success/failure) to this method
+ self._finish_test()
+
+
+class BuildloggerGlobalHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the global logs.
+ """
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_GLOBAL_LOGS_ENDPOINT with
+ the logs that have been captured.
+ """
+ endpoint = APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": self.build_id}
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/config.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/config.py
new file mode 100644
index 00000000000..c3960bbafd3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/config.py
@@ -0,0 +1,161 @@
+"""
+Configuration functions for the logging package.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+
+from . import buildlogger
+from . import formatters
+from . import loggers
+
+
+_DEFAULT_FORMAT = "[%(name)s] %(message)s"
+
+
+def using_buildlogger(logging_config):
+ """
+ Returns true if buildlogger is set as a handler on the "fixture" or
+ "tests" loggers, and false otherwise.
+ """
+ for logger_name in (loggers.FIXTURE_LOGGER_NAME, loggers.TESTS_LOGGER_NAME):
+ logger_info = logging_config[logger_name]
+ if _get_buildlogger_handler_info(logger_info) is not None:
+ return True
+ return False
+
+
+def apply_config(logging_config):
+ """
+ Adds all handlers specified by the configuration to the "executor",
+ "fixture", and "tests" loggers.
+ """
+
+ logging_components = (loggers.EXECUTOR_LOGGER_NAME,
+ loggers.FIXTURE_LOGGER_NAME,
+ loggers.TESTS_LOGGER_NAME)
+
+ if not all(component in logging_config for component in logging_components):
+ raise ValueError("Logging configuration should contain %s, %s, and %s components"
+ % logging_components)
+
+ # Configure the executor, fixture, and tests loggers.
+ for component in logging_components:
+ logger = loggers.LOGGERS_BY_NAME[component]
+ logger_info = logging_config[component]
+ _configure_logger(logger, logger_info)
+
+ # Configure the buildlogger logger.
+ loggers._BUILDLOGGER_FALLBACK.addHandler(_fallback_buildlogger_handler())
+
+
+def apply_buildlogger_global_handler(logger, logging_config, build_id=None, build_config=None):
+ """
+ Adds a buildlogger.BuildloggerGlobalHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.FIXTURE_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerGlobalHandler(build_id,
+ build_config,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def apply_buildlogger_test_handler(logger,
+ logging_config,
+ build_id=None,
+ build_config=None,
+ test_id=None):
+ """
+ Adds a buildlogger.BuildloggerTestHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.TESTS_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config, test_id)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerTestHandler(build_id,
+ build_config,
+ test_id,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def _configure_logger(logger, logger_info):
+ """
+ Adds the handlers specified by the configuration to 'logger'.
+ """
+
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ for handler_info in logger_info.get("handlers", []):
+ handler_class = handler_info["class"]
+ if handler_class == "logging.FileHandler":
+ handler = logging.FileHandler(filename=handler_info["filename"],
+ mode=handler_info.get("mode", "w"))
+ elif handler_class == "logging.NullHandler":
+ handler = logging.NullHandler()
+ elif handler_class == "logging.StreamHandler":
+ handler = logging.StreamHandler(sys.stdout)
+ elif handler_class == "buildlogger":
+ continue # Buildlogger handlers are applied when running tests.
+ else:
+ raise ValueError("Unknown handler class '%s'" % (handler_class))
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+
+def _fallback_buildlogger_handler():
+ """
+ Returns a handler that writes to stderr.
+ """
+
+ log_format = "[buildlogger:%(name)s] %(message)s"
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(formatter)
+
+ return handler
+
+def _get_buildlogger_handler_info(logger_info):
+ """
+ Returns the buildlogger handler information if it exists, and None
+ otherwise.
+ """
+
+ for handler_info in logger_info["handlers"]:
+ handler_info = handler_info.copy()
+ if handler_info.pop("class") == "buildlogger":
+ return handler_info
+ return None
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/flush.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/flush.py
new file mode 100644
index 00000000000..c45533f1e13
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/flush.py
@@ -0,0 +1,97 @@
+"""
+Workaround for having too many threads running on 32-bit systems when
+logging to buildlogger that still allows periodically flushing messages
+to the buildlogger server.
+
+This is because a utils.timer.AlarmClock instance is used for each
+buildlogger.BuildloggerTestHandler, but only dismiss()ed when the Python
+process is about to exit.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from ..utils import queue
+
+
+_LOGGER_QUEUE = queue.Queue()
+
+_FLUSH_THREAD_LOCK = threading.Lock()
+_FLUSH_THREAD = None
+
+
+def start_thread():
+ """
+ Starts the flush thread.
+ """
+
+ global _FLUSH_THREAD
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is not None:
+ raise ValueError("FlushThread has already been started")
+
+ _FLUSH_THREAD = _FlushThread()
+ _FLUSH_THREAD.start()
+
+
+def stop_thread():
+ """
+ Signals the flush thread to stop and waits until it does.
+ """
+
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is None:
+ raise ValueError("FlushThread hasn't been started")
+
+ # Add sentinel value to indicate when there are no more loggers to process.
+ _LOGGER_QUEUE.put(None)
+ _FLUSH_THREAD.join()
+
+
+def close_later(logger):
+ """
+ Adds 'logger' to the queue so that it is closed later by the flush
+ thread.
+ """
+ _LOGGER_QUEUE.put(logger)
+
+
+class _FlushThread(threading.Thread):
+ """
+ Asynchronously flushes and closes logging handlers.
+ """
+
+ def __init__(self):
+ """
+ Initializes the flush thread.
+ """
+
+ threading.Thread.__init__(self, name="FlushThread")
+ # Do not wait to flush the logs if interrupted by the user.
+ self.daemon = True
+
+ def run(self):
+ """
+ Continuously shuts down loggers from the queue.
+ """
+
+ while True:
+ logger = _LOGGER_QUEUE.get()
+ try:
+ if logger is None:
+ # Sentinel value received, so exit.
+ break
+ _FlushThread._shutdown_logger(logger)
+ finally:
+ _LOGGER_QUEUE.task_done()
+
+ @staticmethod
+ def _shutdown_logger(logger):
+ """
+ Flushes and closes all handlers of 'logger'.
+ """
+
+ for handler in logger.handlers:
+ handler.flush()
+ handler.close()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
new file mode 100644
index 00000000000..4cc36da32d4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
@@ -0,0 +1,50 @@
+"""
+Custom formatters for the logging handlers.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+import time
+
+
+class ISO8601Formatter(logging.Formatter):
+ """
+ An ISO 8601 compliant formatter for log messages. It formats the
+ timezone as an hour/minute offset and uses a period as the
+ millisecond separator in order to match the log messages of MongoDB.
+ """
+
+ def formatTime(self, record, datefmt=None):
+ converted_time = self.converter(record.created)
+
+ if datefmt is not None:
+ return time.strftime(datefmt, converted_time)
+
+ formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", converted_time)
+ timezone = ISO8601Formatter._format_timezone_offset(converted_time)
+ return "%s.%03d%s" % (formatted_time, record.msecs, timezone)
+
+ @staticmethod
+ def _format_timezone_offset(converted_time):
+ """
+ Returns the timezone as an hour/minute offset in the form
+ "+HHMM" or "-HHMM".
+ """
+
+ # Windows treats %z in the format string as %Z, so we compute the hour/minute offset
+ # manually.
+ if converted_time.tm_isdst == 1 and time.daylight:
+ utc_offset_secs = time.altzone
+ else:
+ utc_offset_secs = time.timezone
+
+ # The offset is positive if the local timezone is behind (east of) UTC, and negative if it
+ # is ahead (west) of UTC.
+ utc_offset_prefix = "-" if utc_offset_secs > 0 else "+"
+ utc_offset_secs = abs(utc_offset_secs)
+
+ utc_offset_mins = (utc_offset_secs / 60) % 60
+ utc_offset_hours = utc_offset_secs / 3600
+ return "%s%02d%02d" % (utc_offset_prefix, utc_offset_hours, utc_offset_mins)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
new file mode 100644
index 00000000000..b688a1da68a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
@@ -0,0 +1,178 @@
+"""
+Additional handlers that are used as the base classes of the buildlogger
+handler.
+"""
+
+from __future__ import absolute_import
+
+import json
+import logging
+import threading
+import urllib2
+
+from .. import utils
+from ..utils import timer
+
+_TIMEOUT_SECS = 10
+
+class BufferedHandler(logging.Handler):
+ """
+ A handler class that buffers logging records in memory. Whenever
+ each record is added to the buffer, a check is made to see if the
+ buffer should be flushed. If it should, then flush() is expected to
+ do what's needed.
+ """
+
+ def __init__(self, capacity, interval_secs):
+ """
+ Initializes the handler with the buffer size and timeout after
+ which the buffer is flushed regardless.
+ """
+
+ logging.Handler.__init__(self)
+
+ if not isinstance(capacity, int):
+ raise TypeError("capacity must be an integer")
+ elif capacity <= 0:
+ raise ValueError("capacity must be a positive integer")
+
+ if not isinstance(interval_secs, (int, float)):
+ raise TypeError("interval_secs must be a number")
+ elif interval_secs <= 0.0:
+ raise ValueError("interval_secs must be a positive number")
+
+ self.capacity = capacity
+ self.interval_secs = interval_secs
+ self.buffer = []
+
+ self._lock = threading.Lock()
+ self._timer = None # Defer creation until actually begin to log messages.
+
+ def _new_timer(self):
+ """
+ Returns a new timer.AlarmClock instance that will call the
+ flush() method after 'interval_secs' seconds.
+ """
+
+ return timer.AlarmClock(self.interval_secs, self.flush, args=[self])
+
+ def process_record(self, record):
+ """
+ Applies a transformation to the record before it gets added to
+ the buffer.
+
+ The default implementation returns 'record' unmodified.
+ """
+
+ return record
+
+ def emit(self, record):
+ """
+ Emits a record.
+
+ Append the record to the buffer after it has been transformed by
+ process_record(). If the length of the buffer is greater than or
+ equal to its capacity, then flush() is called to process the
+ buffer.
+
+ After flushing the buffer, the timer is restarted so that it
+ will expire after another 'interval_secs' seconds.
+ """
+
+ with self._lock:
+ self.buffer.append(self.process_record(record))
+ if len(self.buffer) >= self.capacity:
+ if self._timer is not None:
+ self._timer.snooze()
+ self.flush_with_lock(False)
+ if self._timer is not None:
+ self._timer.reset()
+
+ if self._timer is None:
+ self._timer = self._new_timer()
+ self._timer.start()
+
+ def flush(self, close_called=False):
+ """
+ Ensures all logging output has been flushed.
+ """
+
+ with self._lock:
+ if self.buffer:
+ self.flush_with_lock(close_called)
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed.
+
+ This version resets the buffers back to an empty list and is
+ intended to be overridden by subclasses.
+ """
+
+ self.buffer = []
+
+ def close(self):
+ """
+ Tidies up any resources used by the handler.
+
+ Stops the timer and flushes the buffer.
+ """
+
+ if self._timer is not None:
+ self._timer.dismiss()
+ self.flush(close_called=True)
+
+ logging.Handler.close(self)
+
+
+class HTTPHandler(object):
+ """
+ A class which sends data to a web server using POST requests.
+ """
+
+ def __init__(self, realm, url_root, username, password):
+ """
+ Initializes the handler with the necessary authenticaton
+ credentials.
+ """
+
+ digest_handler = urllib2.HTTPDigestAuthHandler()
+ digest_handler.add_password(
+ realm=realm,
+ uri=url_root,
+ user=username,
+ passwd=password)
+
+ self.url_root = url_root
+ self.url_opener = urllib2.build_opener(digest_handler, urllib2.HTTPErrorProcessor())
+
+ def _make_url(self, endpoint):
+ return "%s/%s/" % (self.url_root.rstrip("/"), endpoint.strip("/"))
+
+ def post(self, endpoint, data=None, headers=None, timeout_secs=_TIMEOUT_SECS):
+ """
+ Sends a POST request to the specified endpoint with the supplied
+ data.
+
+ Returns the response, either as a string or a JSON object based
+ on the content type.
+ """
+
+ data = utils.default_if_none(data, [])
+ data = json.dumps(data, encoding="utf-8")
+
+ headers = utils.default_if_none(headers, {})
+ headers["Content-Type"] = "application/json; charset=utf-8"
+
+ url = self._make_url(endpoint)
+ request = urllib2.Request(url=url, data=data, headers=headers)
+
+ response = self.url_opener.open(request, timeout=timeout_secs)
+ headers = response.info()
+
+ content_type = headers.gettype()
+ if content_type == "application/json":
+ encoding = headers.getparam("charset") or "utf-8"
+ return json.load(response, encoding=encoding)
+
+ return response.read()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
new file mode 100644
index 00000000000..35f41512425
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
@@ -0,0 +1,37 @@
+"""
+Module to hold the logger instances themselves.
+"""
+
+from __future__ import absolute_import
+
+import logging
+
+EXECUTOR_LOGGER_NAME = "executor"
+FIXTURE_LOGGER_NAME = "fixture"
+TESTS_LOGGER_NAME = "tests"
+
+def new_logger(logger_name, parent=None):
+ """
+ Returns a new logging.Logger instance with the specified name.
+ """
+
+ # Set up the logger to handle all messages it receives.
+ logger = logging.Logger(logger_name, level=logging.DEBUG)
+
+ if parent is not None:
+ logger.parent = parent
+ logger.propagate = True
+
+ return logger
+
+EXECUTOR = new_logger(EXECUTOR_LOGGER_NAME)
+FIXTURE = new_logger(FIXTURE_LOGGER_NAME)
+TESTS = new_logger(TESTS_LOGGER_NAME)
+
+LOGGERS_BY_NAME = {
+ EXECUTOR_LOGGER_NAME: EXECUTOR,
+ FIXTURE_LOGGER_NAME: FIXTURE,
+ TESTS_LOGGER_NAME: TESTS,
+}
+
+_BUILDLOGGER_FALLBACK = new_logger("fallback")
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/parser.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/parser.py
new file mode 100644
index 00000000000..4bcc7bfb137
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/parser.py
@@ -0,0 +1,368 @@
+"""
+Parser for command line arguments.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import optparse
+
+from . import config as _config
+from . import testing
+from . import utils
+from .. import resmokeconfig
+
+
+# Mapping of the attribute of the parsed arguments (dest) to its key as it appears in the options
+# YAML configuration file. Most should only be converting from snake_case to camelCase.
+DEST_TO_CONFIG = {
+ "base_port": "basePort",
+ "buildlogger_url": "buildloggerUrl",
+ "continue_on_failure": "continueOnFailure",
+ "dbpath_prefix": "dbpathPrefix",
+ "dbtest_executable": "dbtest",
+ "dry_run": "dryRun",
+ "exclude_with_all_tags": "excludeWithAllTags",
+ "exclude_with_any_tags": "excludeWithAnyTags",
+ "include_with_all_tags": "includeWithAllTags",
+ "include_with_any_tags": "includeWithAnyTags",
+ "jobs": "jobs",
+ "mongo_executable": "mongo",
+ "mongod_executable": "mongod",
+ "mongod_parameters": "mongodSetParameters",
+ "mongos_executable": "mongos",
+ "mongos_parameters": "mongosSetParameters",
+ "no_journal": "nojournal",
+ "prealloc_journal": "preallocJournal",
+ "repeat": "repeat",
+ "report_file": "reportFile",
+ "seed": "seed",
+ "shell_read_mode": "shellReadMode",
+ "shell_write_mode": "shellWriteMode",
+ "shuffle": "shuffle",
+ "storage_engine": "storageEngine",
+ "wt_coll_config": "wiredTigerCollectionConfigString",
+ "wt_engine_config": "wiredTigerEngineConfigString",
+ "wt_index_config": "wiredTigerIndexConfigString"
+}
+
+
+def parse_command_line():
+ """
+ Parses the command line arguments passed to resmoke.py.
+ """
+
+ parser = optparse.OptionParser()
+
+ parser.add_option("--suites", dest="suite_files", metavar="SUITE1,SUITE2",
+ help=("Comma separated list of YAML files that each specify the configuration"
+ " of a suite. If the file is located in the resmokeconfig/suites/"
+ " directory, then the basename without the .yml extension can be"
+ " specified, e.g. 'core'."))
+
+ parser.add_option("--executor", dest="executor_file", metavar="EXECUTOR",
+ help=("A YAML file that specifies the executor configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'core_small_oplog'."
+ " If specified in combination with the --suites option, then the suite"
+ " configuration takes precedence."))
+
+ parser.add_option("--log", dest="logger_file", metavar="LOGGER",
+ help=("A YAML file that specifies the logging configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'console'."))
+
+ parser.add_option("--options", dest="options_file", metavar="OPTIONS",
+ help="A YAML file that specifies global options to resmoke.py.")
+
+ parser.add_option("--basePort", dest="base_port", metavar="PORT",
+ help=("The starting port number to use for mongod and mongos processes"
+ " spawned by resmoke.py or the tests themselves. Each fixture and Job"
+ " allocates a contiguous range of ports."))
+
+ parser.add_option("--buildloggerUrl", action="store", dest="buildlogger_url", metavar="URL",
+ help="The root url of the buildlogger server.")
+
+ parser.add_option("--continueOnFailure", action="store_true", dest="continue_on_failure",
+ help="Executes all tests in all suites, even if some of them fail.")
+
+ parser.add_option("--dbpathPrefix", dest="dbpath_prefix", metavar="PATH",
+ help=("The directory which will contain the dbpaths of any mongod's started"
+ " by resmoke.py or the tests themselves."))
+
+ parser.add_option("--dbtest", dest="dbtest_executable", metavar="PATH",
+ help="The path to the dbtest executable for resmoke to use.")
+
+ parser.add_option("--excludeWithAllTags", dest="exclude_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains all of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--excludeWithAnyTags", dest="exclude_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains any of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--includeWithAllTags", dest="include_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have all of the specified tags will be run."))
+
+ parser.add_option("--includeWithAnyTags", dest="include_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have at least one of the specified tags will be"
+ " run."))
+
+ parser.add_option("-n", action="store_const", const="tests", dest="dry_run",
+ help=("Output the tests that would be run."))
+
+ # TODO: add support for --dryRun=commands
+ parser.add_option("--dryRun", type="choice", action="store", dest="dry_run",
+ choices=("off", "tests"), metavar="MODE",
+ help=("Instead of running the tests, output the tests that would be run"
+ " (if MODE=tests). Defaults to MODE=%default."))
+
+ parser.add_option("-j", "--jobs", type="int", dest="jobs", metavar="JOBS",
+ help=("The number of Job instances to use. Each instance will receive its own"
+ " MongoDB deployment to dispatch tests to."))
+
+ parser.add_option("-l", "--listSuites", action="store_true", dest="list_suites",
+ help="List the names of the suites available to execute.")
+
+ parser.add_option("--mongo", dest="mongo_executable", metavar="PATH",
+ help="The path to the mongo shell executable for resmoke.py to use.")
+
+ parser.add_option("--mongod", dest="mongod_executable", metavar="PATH",
+ help="The path to the mongod executable for resmoke.py to use.")
+
+ parser.add_option("--mongodSetParameters", dest="mongod_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongod processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--mongos", dest="mongos_executable", metavar="PATH",
+ help="The path to the mongos executable for resmoke.py to use.")
+
+ parser.add_option("--mongosSetParameters", dest="mongos_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongos processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--nojournal", action="store_true", dest="no_journal",
+ help="Disable journaling for all mongod's.")
+
+ parser.add_option("--nopreallocj", action="store_const", const="off", dest="prealloc_journal",
+ help="Disable preallocation of journal files for all mongod processes.")
+
+ parser.add_option("--preallocJournal", type="choice", action="store", dest="prealloc_journal",
+ choices=("on", "off"), metavar="ON|OFF",
+ help=("Enable or disable preallocation of journal files for all mongod"
+ " processes. Defaults to %default."))
+
+ parser.add_option("--repeat", type="int", dest="repeat", metavar="N",
+ help="Repeat the given suite(s) N times, or until one fails.")
+
+ parser.add_option("--reportFile", dest="report_file", metavar="REPORT",
+ help="Write a JSON file with test status and timing information.")
+
+ parser.add_option("--seed", type="int", dest="seed", metavar="SEED",
+ help=("Seed for the random number generator. Useful in combination with the"
+ " --shuffle option for producing a consistent test execution order."))
+
+ parser.add_option("--shellReadMode", type="choice", action="store", dest="shell_read_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="READ_MODE",
+ help="The read mode used by the mongo shell.")
+
+ parser.add_option("--shellWriteMode", type="choice", action="store", dest="shell_write_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="WRITE_MODE",
+ help="The write mode used by the mongo shell.")
+
+ parser.add_option("--shuffle", action="store_true", dest="shuffle",
+ help="Randomize the order in which tests are executed.")
+
+ parser.add_option("--storageEngine", dest="storage_engine", metavar="ENGINE",
+ help="The storage engine used by dbtests and jstests.")
+
+ parser.add_option("--wiredTigerCollectionConfigString", dest="wt_coll_config", metavar="CONFIG",
+ help="Set the WiredTiger collection configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerEngineConfigString", dest="wt_engine_config", metavar="CONFIG",
+ help="Set the WiredTiger engine configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerIndexConfigString", dest="wt_index_config", metavar="CONFIG",
+ help="Set the WiredTiger index configuration setting for all mongod's.")
+
+ parser.set_defaults(executor_file="with_server",
+ logger_file="console",
+ dry_run="off",
+ list_suites=False,
+ prealloc_journal="off")
+
+ return parser.parse_args()
+
+
+def get_logging_config(values):
+ return _get_logging_config(values.logger_file)
+
+
+def update_config_vars(values):
+ options = _get_options_config(values.options_file)
+
+ config = _config.DEFAULTS.copy()
+ config.update(options)
+
+ values = vars(values)
+ for dest in values:
+ if dest not in DEST_TO_CONFIG:
+ continue
+ config_var = DEST_TO_CONFIG[dest]
+ if values[dest] is not None:
+ config[config_var] = values[dest]
+
+ _config.BASE_PORT = int(config.pop("basePort"))
+ _config.BUILDLOGGER_URL = config.pop("buildloggerUrl")
+ _config.DBPATH_PREFIX = _expand_user(config.pop("dbpathPrefix"))
+ _config.DBTEST_EXECUTABLE = _expand_user(config.pop("dbtest"))
+ _config.DRY_RUN = config.pop("dryRun")
+ _config.EXCLUDE_WITH_ALL_TAGS = config.pop("excludeWithAllTags")
+ _config.EXCLUDE_WITH_ANY_TAGS = config.pop("excludeWithAnyTags")
+ _config.FAIL_FAST = not config.pop("continueOnFailure")
+ _config.INCLUDE_WITH_ALL_TAGS = config.pop("includeWithAllTags")
+ _config.INCLUDE_WITH_ANY_TAGS = config.pop("includeWithAnyTags")
+ _config.JOBS = config.pop("jobs")
+ _config.MONGO_EXECUTABLE = _expand_user(config.pop("mongo"))
+ _config.MONGOD_EXECUTABLE = _expand_user(config.pop("mongod"))
+ _config.MONGOD_SET_PARAMETERS = config.pop("mongodSetParameters")
+ _config.MONGOS_EXECUTABLE = _expand_user(config.pop("mongos"))
+ _config.MONGOS_SET_PARAMETERS = config.pop("mongosSetParameters")
+ _config.NO_JOURNAL = config.pop("nojournal")
+ _config.NO_PREALLOC_JOURNAL = config.pop("preallocJournal") == "off"
+ _config.RANDOM_SEED = config.pop("seed")
+ _config.REPEAT = config.pop("repeat")
+ _config.REPORT_FILE = config.pop("reportFile")
+ _config.SHELL_READ_MODE = config.pop("shellReadMode")
+ _config.SHELL_WRITE_MODE = config.pop("shellWriteMode")
+ _config.SHUFFLE = config.pop("shuffle")
+ _config.STORAGE_ENGINE = config.pop("storageEngine")
+ _config.WT_COLL_CONFIG = config.pop("wiredTigerCollectionConfigString")
+ _config.WT_ENGINE_CONFIG = config.pop("wiredTigerEngineConfigString")
+ _config.WT_INDEX_CONFIG = config.pop("wiredTigerIndexConfigString")
+
+ if config:
+ raise optparse.OptionValueError("Unknown option(s): %s" % (config.keys()))
+
+
+def get_suites(values, args):
+ if (values.suite_files is None and not args) or (values.suite_files is not None and args):
+ raise optparse.OptionValueError("Must specify either --suites or a list of tests")
+
+ # If there are no suites specified, but there are args, assume they are jstests.
+ if args:
+ # No specified config, just use the following, and default the logging and executor.
+ suite_config = _make_jstests_config(args)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite("<jstests>", suite_config)
+ return [suite]
+
+ suite_files = values.suite_files.split(",")
+
+ suites = []
+ for suite_filename in suite_files:
+ suite_config = _get_suite_config(suite_filename)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite(suite_filename, suite_config)
+ suites.append(suite)
+ return suites
+
+
+def get_named_suites():
+ """
+ Returns the list of suites available to execute.
+ """
+
+ # Skip "with_server" and "no_server" because they do not define any test files to run.
+ executor_only = set(["with_server", "no_server"])
+ suite_names = [suite for suite in resmokeconfig.NAMED_SUITES if suite not in executor_only]
+ suite_names.sort()
+ return suite_names
+
+
+def _get_logging_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ how resmoke.py should log the tests and fixtures.
+ """
+
+ # Named loggers are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_LOGGERS:
+ raise optparse.OptionValueError("Unknown logger '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_LOGGERS[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a logger YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname).pop("logging")
+
+
+def _get_options_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ any modifications to global options.
+ """
+
+ if pathname is None:
+ return {}
+
+ return utils.load_yaml_file(pathname).pop("options")
+
+
+def _get_suite_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ what tests to run and how to run them.
+ """
+
+ # Named suites are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown suite '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_SUITES[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a suite YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname)
+
+
+def _make_jstests_config(js_files):
+ for pathname in js_files:
+ if not utils.is_js_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a list of JS files, but got '%s'"
+ % (pathname))
+
+ return {"selector": {"js_test": {"roots": js_files}}}
+
+
+def _ensure_executor(suite_config, executor_pathname):
+ if "executor" not in suite_config:
+ # Named executors are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(executor_pathname) and not os.path.dirname(executor_pathname):
+ if executor_pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown executor '%s'" % (executor_pathname))
+ executor_pathname = resmokeconfig.NAMED_SUITES[executor_pathname]
+
+ if not utils.is_yaml_file(executor_pathname) or not os.path.isfile(executor_pathname):
+ raise optparse.OptionValueError("Expected an executor YAML config, but got '%s'"
+ % (executor_pathname))
+
+ suite_config["executor"] = utils.load_yaml_file(executor_pathname).pop("executor")
+
+
+def _expand_user(pathname):
+ """
+ Wrapper around os.path.expanduser() to do nothing when given None.
+ """
+ if pathname is None:
+ return None
+ return os.path.expanduser(pathname)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/selector.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/selector.py
new file mode 100644
index 00000000000..c2dc0fca41b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/selector.py
@@ -0,0 +1,291 @@
+"""
+Test selection utility.
+
+Defines filtering rules for what tests to include in a suite depending
+on whether they apply to C++ unit tests, dbtests, or JS tests.
+"""
+
+from __future__ import absolute_import
+
+import fnmatch
+import os.path
+import subprocess
+import sys
+
+from . import config
+from . import errors
+from . import utils
+from .utils import globstar
+from .utils import jscomment
+
+def _filter_cpp_tests(kind, root, include_files, exclude_files):
+ """
+ Generic filtering logic for C++ tests that are sourced from a list
+ of test executables.
+ """
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ tests = []
+ with open(root, "r") as fp:
+ for test_path in fp:
+ test_path = test_path.rstrip()
+ tests.append(test_path)
+
+ (remaining, included, _) = _filter_by_filename(kind,
+ tests,
+ include_files,
+ exclude_files)
+
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return tests
+
+def filter_cpp_unit_tests(root="build/unittests.txt", include_files=None, exclude_files=None):
+ """
+ Filters out what C++ unit tests to run.
+ """
+ return _filter_cpp_tests("C++ unit test", root, include_files, exclude_files)
+
+
+def filter_cpp_integration_tests(root="build/integration_tests.txt",
+ include_files=None,
+ exclude_files=None):
+ """
+ Filters out what C++ integration tests to run.
+ """
+ return _filter_cpp_tests("C++ integration test", root, include_files, exclude_files)
+
+
+def filter_dbtests(binary=None, include_suites=None):
+ """
+ Filters out what dbtests to run.
+ """
+
+ # Command line option overrides the YAML configuration.
+ binary = utils.default_if_none(config.DBTEST_EXECUTABLE, binary)
+ # Use the default if nothing specified.
+ binary = utils.default_if_none(binary, config.DEFAULT_DBTEST_EXECUTABLE)
+
+ include_suites = utils.default_if_none(include_suites, [])
+
+ if not utils.is_string_list(include_suites):
+ raise TypeError("include_suites must be a list of strings")
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(binary)[1] != ".exe":
+ binary += ".exe"
+
+ program = subprocess.Popen([binary, "--list"], stdout=subprocess.PIPE)
+ stdout = program.communicate()[0]
+
+ if program.returncode != 0:
+ raise errors.ResmokeError("Getting list of dbtest suites failed")
+
+ dbtests = stdout.splitlines()
+
+ if not include_suites:
+ return dbtests
+
+ dbtests = set(dbtests)
+
+ (verbatim, globbed) = _partition(include_suites, normpath=False)
+ included = _pop_all("dbtest suite", dbtests, verbatim)
+
+ for suite_pattern in globbed:
+ for suite_name in dbtests:
+ if fnmatch.fnmatchcase(suite_name, suite_pattern):
+ included.add(suite_name)
+
+ return list(included)
+
+
+def filter_jstests(roots,
+ include_files=None,
+ include_with_all_tags=None,
+ include_with_any_tags=None,
+ exclude_files=None,
+ exclude_with_all_tags=None,
+ exclude_with_any_tags=None):
+ """
+ Filters out what jstests to run.
+ """
+
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ # Command line options override the YAML options, and all should be defaulted to an empty list
+ # if not specified.
+ tags = {
+ "exclude_with_all_tags": exclude_with_all_tags,
+ "exclude_with_any_tags": exclude_with_any_tags,
+ "include_with_all_tags": include_with_all_tags,
+ "include_with_any_tags": include_with_any_tags,
+ }
+ cmd_line_values = (
+ ("exclude_with_all_tags", config.EXCLUDE_WITH_ALL_TAGS),
+ ("exclude_with_any_tags", config.EXCLUDE_WITH_ANY_TAGS),
+ ("include_with_all_tags", config.INCLUDE_WITH_ALL_TAGS),
+ ("include_with_any_tags", config.INCLUDE_WITH_ANY_TAGS),
+ )
+ for (tag_category, cmd_line_val) in cmd_line_values:
+ if cmd_line_val is not None:
+ # Ignore the empty string when it is used as a tag. Specifying an empty string on the
+ # command line allows a user to unset the list of tags specified in the YAML
+ # configuration.
+ tags[tag_category] = set([tag for tag in cmd_line_val.split(",") if tag != ""])
+ else:
+ tags[tag_category] = set(utils.default_if_none(tags[tag_category], []))
+
+ using_tags = 0
+ for name in tags:
+ if not utils.is_string_set(tags[name]):
+ raise TypeError("%s must be a list of strings" % (name))
+ if len(tags[name]) > 0:
+ using_tags += 1
+
+ if using_tags > 1:
+ raise ValueError("Can only specify one of 'include_with_all_tags', 'include_with_any_tags',"
+ " 'exclude_with_all_tags', and 'exclude_with_any_tags'. If you wish to"
+ " unset one of these options, use --includeWithAllTags='' or similar")
+
+ jstests = []
+ for root in roots:
+ jstests.extend(globstar.iglob(root))
+
+ (remaining, included, _) = _filter_by_filename("jstest",
+ jstests,
+ include_files,
+ exclude_files)
+
+ # Skip parsing comments if not using tags
+ if not using_tags:
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return jstests
+
+ jstests = set(remaining)
+ excluded = set()
+
+ for filename in jstests:
+ file_tags = set(jscomment.get_tags(filename))
+ if tags["include_with_all_tags"] and not tags["include_with_all_tags"] - file_tags:
+ included.add(filename)
+ elif tags["include_with_any_tags"] and tags["include_with_any_tags"] & file_tags:
+ included.add(filename)
+ elif tags["exclude_with_all_tags"] and not tags["exclude_with_all_tags"] - file_tags:
+ excluded.add(filename)
+ elif tags["exclude_with_any_tags"] and tags["exclude_with_any_tags"] & file_tags:
+ excluded.add(filename)
+
+ if tags["include_with_all_tags"] or tags["include_with_any_tags"]:
+ if exclude_files:
+ return list((included & jstests) - excluded)
+ return list(included)
+ else:
+ if include_files:
+ return list(included | (jstests - excluded))
+ return list(jstests - excluded)
+
+
+def _filter_by_filename(kind, universe, include_files, exclude_files):
+ """
+ Filters out what tests to run solely by filename.
+
+ Returns the triplet (remaining, included, excluded), where
+ 'remaining' is 'universe' after 'included' and 'excluded' were
+ removed from it.
+ """
+
+ if not utils.is_string_list(include_files):
+ raise TypeError("include_files must be a list of strings")
+ elif not utils.is_string_list(exclude_files):
+ raise TypeError("exclude_files must be a list of strings")
+ elif include_files and exclude_files:
+ raise ValueError("Cannot specify both include_files and exclude_files")
+
+ universe = set(universe)
+ if include_files:
+ (verbatim, globbed) = _partition(include_files)
+ # Remove all matching files of 'verbatim' from 'universe'.
+ included_verbatim = _pop_all(kind, universe, verbatim)
+ included_globbed = set()
+
+ for file_pattern in globbed:
+ included_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'included_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'included_globbed' and 'universe'.
+ included_globbed = _pop_all(kind, universe, included_globbed, validate=False)
+ return (universe, included_verbatim | included_globbed, set())
+
+ elif exclude_files:
+ (verbatim, globbed) = _partition(exclude_files)
+
+ # Remove all matching files of 'verbatim' from 'universe'.
+ excluded_verbatim = _pop_all(kind, universe, verbatim)
+ excluded_globbed = set()
+
+ for file_pattern in globbed:
+ excluded_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'excluded_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'excluded_globbed' and 'universe'.
+ excluded_globbed = _pop_all(kind, universe, excluded_globbed, validate=False)
+ return (universe, set(), excluded_verbatim | excluded_globbed)
+
+ return (universe, set(), set())
+
+
+def _partition(pathnames, normpath=True):
+ """
+ Splits 'pathnames' into two separate lists based on whether they
+ use a glob pattern.
+
+ Returns the pair (non-globbed pathnames, globbed pathnames).
+ """
+
+ verbatim = []
+ globbed = []
+
+ for pathname in pathnames:
+ if globstar.is_glob_pattern(pathname):
+ globbed.append(pathname)
+ continue
+
+ # Normalize 'pathname' so exact string comparison can be used later.
+ if normpath:
+ pathname = os.path.normpath(pathname)
+ verbatim.append(pathname)
+
+ return (verbatim, globbed)
+
+
+def _pop_all(kind, universe, iterable, validate=True):
+ """
+ Removes all elements of 'iterable' from 'universe' and returns them.
+
+ If 'validate' is true, then a ValueError is raised if a element
+ would be removed multiple times, or if an element of 'iterable' does
+ not appear in 'universe' at all.
+ """
+
+ members = set()
+
+ for elem in iterable:
+ if validate and elem in members:
+ raise ValueError("%s '%s' specified multiple times" % (kind, elem))
+
+ if elem in universe:
+ universe.remove(elem)
+ members.add(elem)
+ elif validate:
+ raise ValueError("Unrecognized %s '%s'" % (kind, elem))
+
+ return members
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
new file mode 100644
index 00000000000..e4acff00521
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
@@ -0,0 +1,9 @@
+"""
+Extension to the unittest package to support buildlogger and parallel
+test execution.
+"""
+
+from __future__ import absolute_import
+
+from . import executor
+from . import suite
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/executor.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/executor.py
new file mode 100644
index 00000000000..5d79abd6ac6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/executor.py
@@ -0,0 +1,307 @@
+"""
+Driver of the test execution framework.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from . import fixtures
+from . import hooks as _hooks
+from . import job as _job
+from . import report as _report
+from . import testcases
+from .. import config as _config
+from .. import errors
+from .. import logging
+from .. import utils
+from ..utils import queue as _queue
+
+
+class TestGroupExecutor(object):
+ """
+ Executes a test group.
+
+ Responsible for setting up and tearing down the fixtures that the
+ tests execute against.
+ """
+
+ _TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
+
+ def __init__(self,
+ exec_logger,
+ test_group,
+ logging_config,
+ config=None,
+ fixture=None,
+ hooks=None):
+ """
+ Initializes the TestGroupExecutor with the test group to run.
+ """
+
+ # Build a logger for executing this group of tests.
+ logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
+ self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
+
+ self.logging_config = logging_config
+ self.fixture_config = fixture
+ self.hooks_config = utils.default_if_none(hooks, [])
+ self.test_config = utils.default_if_none(config, {})
+
+ self._test_group = test_group
+
+ self._using_buildlogger = logging.config.using_buildlogger(logging_config)
+ self._build_config = None
+
+ if self._using_buildlogger:
+ self._build_config = logging.buildlogger.get_config()
+
+ # Must be done after getting buildlogger configuration.
+ self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
+
+ def run(self):
+ """
+ Executes the test group.
+
+ Any exceptions that occur during setting up or tearing down a
+ fixture are propagated.
+ """
+
+ self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
+
+ return_code = 0
+ try:
+ if not self._setup_fixtures():
+ return_code = 2
+ return
+
+ num_repeats = _config.REPEAT
+ while num_repeats > 0:
+ test_queue = self._make_test_queue()
+ self._test_group.record_start()
+ (report, interrupted) = self._run_tests(test_queue)
+ self._test_group.record_end(report)
+
+ # If the user triggered a KeyboardInterrupt, then we should stop.
+ if interrupted:
+ raise errors.UserInterrupt("Received interrupt from user")
+
+ sb = [] # String builder.
+ self._test_group.summarize_latest(sb)
+ self.logger.info("Summary: %s", "\n ".join(sb))
+
+ if not report.wasSuccessful():
+ return_code = 1
+ if _config.FAIL_FAST:
+ break
+
+ # Clear the report so it can be reused for the next execution.
+ for job in self._jobs:
+ job.report.reset()
+ num_repeats -= 1
+ finally:
+ if not self._teardown_fixtures():
+ return_code = 2
+ self._test_group.return_code = return_code
+
+ def _setup_fixtures(self):
+ """
+ Sets up a fixture for each job.
+ """
+
+ for job in self._jobs:
+ try:
+ job.fixture.setup()
+ except:
+ self.logger.exception("Encountered an error while setting up %s.", job.fixture)
+ return False
+
+ # Once they have all been started, wait for them to become available.
+ for job in self._jobs:
+ try:
+ job.fixture.await_ready()
+ except:
+ self.logger.exception("Encountered an error while waiting for %s to be ready",
+ job.fixture)
+ return False
+
+ return True
+
+ def _run_tests(self, test_queue):
+ """
+ Starts a thread for each Job instance and blocks until all of
+ the tests are run.
+
+ Returns a (combined report, user interrupted) pair, where the
+ report contains the status and timing information of tests run
+ by all of the threads.
+ """
+
+ threads = []
+ interrupt_flag = threading.Event()
+ user_interrupted = False
+ try:
+ # Run each Job instance in its own thread.
+ for job in self._jobs:
+ t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
+ # Do not wait for tests to finish executing if interrupted by the user.
+ t.daemon = True
+ t.start()
+ threads.append(t)
+
+ joined = False
+ while not joined:
+ # Need to pass a timeout to join() so that KeyboardInterrupt exceptions
+ # are propagated.
+ joined = test_queue.join(TestGroupExecutor._TIMEOUT)
+ except (KeyboardInterrupt, SystemExit):
+ interrupt_flag.set()
+ user_interrupted = True
+ else:
+ # Only wait for all the Job instances if not interrupted by the user.
+ for t in threads:
+ t.join()
+
+ reports = [job.report for job in self._jobs]
+ combined_report = _report.TestReport.combine(*reports)
+
+ # We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
+ # instance if a test fails and it decides to drain the queue. We only want to raise a
+ # StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
+ return (combined_report, user_interrupted)
+
+ def _teardown_fixtures(self):
+ """
+ Tears down all of the fixtures.
+
+ Returns true if all fixtures were torn down successfully, and
+ false otherwise.
+ """
+
+ success = True
+ for job in self._jobs:
+ try:
+ if not job.fixture.teardown():
+ self.logger.warn("Teardown of %s was not successful.", job.fixture)
+ success = False
+ except:
+ self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
+ success = False
+
+ return success
+
+ def _get_build_id(self, job_num):
+ """
+ Returns a unique build id for a job.
+ """
+
+ build_config = self._build_config
+
+ if self._using_buildlogger:
+ # Use a distinct "builder" for each job in order to separate their logs.
+ if build_config is not None and "builder" in build_config:
+ build_config = build_config.copy()
+ build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
+
+ build_id = logging.buildlogger.new_build_id(build_config)
+
+ if build_config is None or build_id is None:
+ self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
+ " back to stderr.", job_num)
+
+ return build_id, build_config
+
+ return None, build_config
+
+ def _make_fixture(self, job_num, build_id, build_config):
+ """
+ Creates a fixture for a job.
+ """
+
+ fixture_config = {}
+ fixture_class = fixtures.NOOP_FIXTURE_CLASS
+
+ if self.fixture_config is not None:
+ fixture_config = self.fixture_config.copy()
+ fixture_class = fixture_config.pop("class")
+
+ logger_name = "%s:job%d" % (fixture_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
+ logging.config.apply_buildlogger_global_handler(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
+
+ def _make_hooks(self, job_num, fixture):
+ """
+ Creates the custom behaviors for the job's fixture.
+ """
+
+ behaviors = []
+
+ for behavior_config in self.hooks_config:
+ behavior_config = behavior_config.copy()
+ behavior_class = behavior_config.pop("class")
+
+ logger_name = "%s:job%d" % (behavior_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+ behavior = _hooks.make_custom_behavior(behavior_class,
+ logger,
+ fixture,
+ **behavior_config)
+ behaviors.append(behavior)
+
+ return behaviors
+
+ def _make_job(self, job_num):
+ """
+ Returns a Job instance with its own fixture, hooks, and test
+ report.
+ """
+
+ build_id, build_config = self._get_build_id(job_num)
+ fixture = self._make_fixture(job_num, build_id, build_config)
+ hooks = self._make_hooks(job_num, fixture)
+
+ logger_name = "%s:job%d" % (self.logger.name, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ if build_id is not None:
+ endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
+ url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
+ logger.info("Writing output of job #%d to %s.", job_num, url)
+
+ report = _report.TestReport(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return _job.Job(logger, fixture, hooks, report)
+
+ def _make_test_queue(self):
+ """
+ Returns a queue of TestCase instances.
+
+ Use a multi-consumer queue instead of a unittest.TestSuite so
+ that the test cases can be dispatched to multiple threads.
+ """
+
+ test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
+ parent=logging.loggers.TESTS)
+
+ # Put all the test cases in a queue.
+ queue = _queue.Queue()
+ for test_name in self._test_group.tests:
+ test_case = testcases.make_test_case(self._test_group.test_kind,
+ test_kind_logger,
+ test_name,
+ **self.test_config)
+ queue.put(test_case)
+
+ # Add sentinel value for each job to indicate when there are no more items to process.
+ for _ in xrange(_config.JOBS):
+ queue.put(None)
+
+ return queue
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
new file mode 100644
index 00000000000..d68a66911d2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
@@ -0,0 +1,32 @@
+"""
+Fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+from .interface import Fixture, ReplFixture
+from .standalone import MongoDFixture
+from .replicaset import ReplicaSetFixture
+from .masterslave import MasterSlaveFixture
+from .shardedcluster import ShardedClusterFixture
+
+
+NOOP_FIXTURE_CLASS = "Fixture"
+
+_FIXTURES = {
+ "Fixture": Fixture,
+ "MongoDFixture": MongoDFixture,
+ "ReplicaSetFixture": ReplicaSetFixture,
+ "MasterSlaveFixture": MasterSlaveFixture,
+ "ShardedClusterFixture": ShardedClusterFixture,
+}
+
+
+def make_fixture(class_name, *args, **kwargs):
+ """
+ Factory function for creating Fixture instances.
+ """
+
+ if class_name not in _FIXTURES:
+ raise ValueError("Unknown fixture class '%s'" % (class_name))
+ return _FIXTURES[class_name](*args, **kwargs)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
new file mode 100644
index 00000000000..5fbf537c107
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -0,0 +1,128 @@
+"""
+Interface of the different fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+import pymongo
+
+from ... import errors
+from ... import logging
+
+
+class Fixture(object):
+ """
+ Base class for all fixtures.
+ """
+
+ def __init__(self, logger, job_num):
+ """
+ Initializes the fixtures with a logger instance.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(job_num, int):
+ raise TypeError("job_num must be an integer")
+ elif job_num < 0:
+ raise ValueError("job_num must be a nonnegative integer")
+
+ self.logger = logger
+ self.job_num = job_num
+
+ self.port = None # Port that the mongo shell should connect to.
+
+ def setup(self):
+ """
+ Creates the fixture.
+ """
+ pass
+
+ def await_ready(self):
+ """
+ Blocks until the fixture can be used for testing.
+ """
+ pass
+
+ def teardown(self):
+ """
+ Destroys the fixture. Return true if was successful, and false otherwise.
+ """
+ return True
+
+ def is_running(self):
+ """
+ Returns true if the fixture is still operating and more tests
+ can be run, and false otherwise.
+ """
+ return True
+
+ def get_connection_string(self):
+ """
+ Returns the connection string for this fixture. This is NOT a
+ driver connection string, but a connection string of the format
+ expected by the mongo::ConnectionString class.
+ """
+ raise NotImplementedError("get_connection_string must be implemented by Fixture subclasses")
+
+ def __str__(self):
+ return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
+
+ def __repr__(self):
+ return "%r(%r, %r)" % (self.__class__.__name__, self.logger, self.job_num)
+
+
+class ReplFixture(Fixture):
+ """
+ Base class for all fixtures that support replication.
+ """
+
+ AWAIT_REPL_TIMEOUT_MINS = 5
+
+ def get_primary(self):
+ """
+ Returns the primary of a replica set, or the master of a
+ master-slave deployment.
+ """
+ raise NotImplementedError("get_primary must be implemented by ReplFixture subclasses")
+
+ def get_secondaries(self):
+ """
+ Returns a list containing the secondaries of a replica set, or
+ the slave of a master-slave deployment.
+ """
+ raise NotImplementedError("get_secondaries must be implemented by ReplFixture subclasses")
+
+ def await_repl(self):
+ """
+ Blocks until all operations on the primary/master have
+ replicated to all other nodes.
+ """
+ raise NotImplementedError("await_repl must be implemented by ReplFixture subclasses")
+
+ def retry_until_wtimeout(self, insert_fn):
+ """
+ Given a callback function representing an insert operation on
+ the primary, handle any connection failures, and keep retrying
+ the operation for up to 'AWAIT_REPL_TIMEOUT_MINS' minutes.
+
+ The insert operation callback should take an argument for the
+ number of remaining seconds to provide as the timeout for the
+ operation.
+ """
+
+ deadline = time.time() + ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60
+
+ while True:
+ try:
+ remaining = deadline - time.time()
+ insert_fn(remaining)
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure("Failed to connect to the primary on port %d" %
+ self.port)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
new file mode 100644
index 00000000000..f3dbf87eb91
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -0,0 +1,209 @@
+"""
+Master/slave fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class MasterSlaveFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a master/slave deployment to
+ run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ master_options=None,
+ slave_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.master_options = utils.default_if_none(master_options, {})
+ self.slave_options = utils.default_if_none(slave_options, {})
+ self.preserve_dbpath = preserve_dbpath
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.master = None
+ self.slave = None
+
+ def setup(self):
+ if self.master is None:
+ self.master = self._new_mongod_master()
+ self.master.setup()
+ self.port = self.master.port
+
+ if self.slave is None:
+ self.slave = self._new_mongod_slave()
+ self.slave.setup()
+
+ def await_ready(self):
+ self.master.await_ready()
+ self.slave.await_ready()
+
+ # Do a replicated write to ensure that the slave has finished with its initial sync before
+ # starting to run any tests.
+ client = utils.new_mongo_client(self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_ready", write_concern=write_concern)
+ coll.insert_one({"awaiting": "ready"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Master-slave deployment was expected to be running in teardown(),"
+ " but wasn't.")
+
+ if self.slave is not None:
+ if running_at_start:
+ self.logger.info("Stopping slave...")
+
+ success = self.slave.teardown()
+
+ if running_at_start:
+ self.logger.info("Successfully stopped slave.")
+
+ if self.master is not None:
+ if running_at_start:
+ self.logger.info("Stopping master...")
+
+ success = self.master.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped master.")
+
+ return success
+
+ def is_running(self):
+ return (self.master is not None and self.master.is_running() and
+ self.slave is not None and self.slave.is_running())
+
+ def get_primary(self):
+ return self.master
+
+ def get_secondaries(self):
+ return [self.slave]
+
+ def await_repl(self):
+ """
+ Inserts a document into each database on the master and waits
+ for all write operations to be acknowledged by the master-slave
+ deployment.
+ """
+
+ client = utils.new_mongo_client(self.port)
+
+ # We verify that each database has replicated to the slave because in the case of an initial
+ # sync, the slave may acknowledge writes to one database before it has finished syncing
+ # others.
+ db_names = client.database_names()
+ self.logger.info("Awaiting replication of inserts to each of the following databases on"
+ " master on port %d: %s",
+ self.port,
+ db_names)
+
+ for db_name in db_names:
+ if db_name == "local":
+ continue # The local database is expected to differ, ignore.
+
+ self.logger.info("Awaiting replication of insert to database %s (w=2, wtimeout=%d min)"
+ " to master on port %d",
+ db_name,
+ interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client[db_name].get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed for database %s.", db_name)
+
+ self.logger.info("Finished awaiting replication.")
+
+ def _new_mongod(self, mongod_logger, mongod_options):
+ """
+ Returns a standalone.MongoDFixture with the specified logger and
+ options.
+ """
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongod_master(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ master of a master-slave deployment.
+ """
+
+ logger_name = "%s:master" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.master_options)
+ mongod_options["master"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "master")
+ return self._new_mongod(mongod_logger, mongod_options)
+
+ def _new_mongod_slave(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ slave of a master-slave deployment.
+ """
+
+ logger_name = "%s:slave" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.slave_options)
+ mongod_options["slave"] = ""
+ mongod_options["source"] = "localhost:%d" % (self.port)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
+ return self._new_mongod(mongod_logger, mongod_options)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
new file mode 100644
index 00000000000..e9930627641
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -0,0 +1,211 @@
+"""
+Replica set fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class ReplicaSetFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a replica set to run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_nodes=2,
+ auth_options=None,
+ replset_config_options=None):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_nodes = num_nodes
+ self.auth_options = auth_options
+ self.replset_config_options = utils.default_if_none(replset_config_options, {})
+
+ # The dbpath in mongod_options is used as the dbpath prefix for replica set members and
+ # takes precedence over other settings. The ShardedClusterFixture uses this parameter to
+ # create replica sets and assign their dbpath structure explicitly.
+ if "dbpath" in self.mongod_options:
+ self._dbpath_prefix = self.mongod_options.pop("dbpath")
+ else:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.nodes = []
+ self.replset_name = None
+
+ def setup(self):
+ self.replset_name = self.mongod_options.get("replSet", "rs")
+
+ if not self.nodes:
+ for i in xrange(self.num_nodes):
+ node = self._new_mongod(i, self.replset_name)
+ self.nodes.append(node)
+
+ for node in self.nodes:
+ node.setup()
+
+ self.port = self.get_primary().port
+
+ # Call await_ready() on each of the nodes here because we want to start the election as
+ # soon as possible.
+ for node in self.nodes:
+ node.await_ready()
+
+ # Initiate the replica set.
+ members = []
+ for (i, node) in enumerate(self.nodes):
+ member_info = {"_id": i, "host": node.get_connection_string()}
+ if i > 0:
+ member_info["priority"] = 0
+ if i >= 7:
+ # Only 7 nodes in a replica set can vote, so the other members must be non-voting.
+ member_info["votes"] = 0
+ members.append(member_info)
+ initiate_cmd_obj = {"replSetInitiate": {"_id": self.replset_name, "members": members}}
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ if self.replset_config_options.get("configsvr", False):
+ initiate_cmd_obj["replSetInitiate"]["configsvr"] = True
+
+ self.logger.info("Issuing replSetInitiate command...")
+ client.admin.command(initiate_cmd_obj)
+
+ def await_ready(self):
+ # Wait for the primary to be elected.
+ client = utils.new_mongo_client(port=self.port)
+ while True:
+ is_master = client.admin.command("isMaster")["ismaster"]
+ if is_master:
+ break
+ self.logger.info("Waiting for primary on port %d to be elected.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ # Wait for the secondaries to become available.
+ for secondary in self.get_secondaries():
+ client = utils.new_mongo_client(port=secondary.port,
+ read_preference=pymongo.ReadPreference.SECONDARY)
+ while True:
+ is_secondary = client.admin.command("isMaster")["secondary"]
+ if is_secondary:
+ break
+ self.logger.info("Waiting for secondary on port %d to become available.",
+ secondary.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Replica set was expected to be running in teardown(), but wasn't.")
+ else:
+ self.logger.info("Stopping all members of the replica set...")
+
+ # Terminate the secondaries first to reduce noise in the logs.
+ for node in reversed(self.nodes):
+ success = node.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped all members of the replica set.")
+
+ return success
+
+ def is_running(self):
+ return all(node.is_running() for node in self.nodes)
+
+ def get_primary(self):
+ # The primary is always the first element of the 'nodes' list because all other members of
+ # the replica set are configured with priority=0.
+ return self.nodes[0]
+
+ def get_secondaries(self):
+ return self.nodes[1:]
+
+ def await_repl(self):
+ self.logger.info("Awaiting replication of insert (w=%d, wtimeout=%d min) to primary on port"
+ " %d", self.num_nodes, interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+ client = utils.new_mongo_client(port=self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=self.num_nodes, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed.")
+
+ def _new_mongod(self, index, replset_name):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ replica-set member of 'replset_name'.
+ """
+
+ mongod_logger = self._get_logger_for_mongod(index)
+ mongod_options = self.mongod_options.copy()
+ mongod_options["replSet"] = replset_name
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _get_logger_for_mongod(self, index):
+ """
+ Returns a new logging.Logger instance for use as the primary or
+ secondary of a replica-set.
+ """
+
+ if index == 0:
+ logger_name = "%s:primary" % (self.logger.name)
+ else:
+ suffix = str(index - 1) if self.num_nodes > 2 else ""
+ logger_name = "%s:secondary%s" % (self.logger.name, suffix)
+
+ return logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ def get_connection_string(self):
+ if self.replset_name is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ conn_strs = [node.get_connection_string() for node in self.nodes]
+ return self.replset_name + "/" + ",".join(conn_strs)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
new file mode 100644
index 00000000000..ab7b26bf372
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -0,0 +1,347 @@
+"""
+Sharded cluster fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from . import replicaset
+from ... import config
+from ... import core
+from ... import errors
+from ... import logging
+from ... import utils
+
+
+class ShardedClusterFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a sharded cluster to run
+ against.
+ """
+
+ _CONFIGSVR_REPLSET_NAME = "config-rs"
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_shards=1,
+ separate_configsvr=True,
+ enable_sharding=None,
+ auth_options=None):
+ """
+ Initializes ShardedClusterFixture with the different options to
+ the mongod and mongos processes.
+ """
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongos_executable = mongos_executable
+ self.mongos_options = utils.default_if_none(mongos_options, {})
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_shards = num_shards
+ self.separate_configsvr = separate_configsvr
+ self.enable_sharding = utils.default_if_none(enable_sharding, [])
+ self.auth_options = auth_options
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.configsvr = None
+ self.mongos = None
+ self.shards = []
+
+ def setup(self):
+ if self.separate_configsvr:
+ if self.configsvr is None:
+ self.configsvr = self._new_configsvr()
+ self.configsvr.setup()
+
+ if not self.shards:
+ for i in xrange(self.num_shards):
+ shard = self._new_shard(i)
+ self.shards.append(shard)
+
+ # Start up each of the shards
+ for shard in self.shards:
+ shard.setup()
+
+ def await_ready(self):
+ # Wait for the config server
+ if self.configsvr is not None:
+ self.configsvr.await_ready()
+
+ # Wait for each of the shards
+ for shard in self.shards:
+ shard.await_ready()
+
+ if self.mongos is None:
+ self.mongos = self._new_mongos()
+
+ # Start up the mongos
+ self.mongos.setup()
+
+ # Wait for the mongos
+ self.mongos.await_ready()
+ self.port = self.mongos.port
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ # Inform mongos about each of the shards
+ for shard in self.shards:
+ self._add_shard(client, shard)
+
+ # Enable sharding on each of the specified databases
+ for db_name in self.enable_sharding:
+ self.logger.info("Enabling sharding for '%s' database...", db_name)
+ client.admin.command({"enablesharding": db_name})
+
+ def teardown(self):
+ """
+ Shuts down the sharded cluster.
+ """
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Sharded cluster was expected to be running in teardown(), but"
+ " wasn't.")
+
+ if self.configsvr is not None:
+ if running_at_start:
+ self.logger.info("Stopping config server...")
+
+ success = self.configsvr.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the config server.")
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos...")
+
+ success = self.mongos.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos.")
+
+ if running_at_start:
+ self.logger.info("Stopping shards...")
+ for shard in self.shards:
+ success = shard.teardown() and success
+ if running_at_start:
+ self.logger.info("Successfully terminated all shards.")
+
+ return success
+
+ def is_running(self):
+ """
+ Returns true if the config server, all shards, and the mongos
+ are all still operating, and false otherwise.
+ """
+ return (self.configsvr is not None and self.configsvr.is_running() and
+ all(shard.is_running() for shard in self.shards) and
+ self.mongos is not None and self.mongos.is_running())
+
+ def _new_configsvr(self):
+ """
+ Returns a replicaset.ReplicaSetFixture configured to be used as
+ the config server of a sharded cluster.
+ """
+
+ logger_name = "%s:configsvr" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["configsvr"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
+ mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ mongod_options["storageEngine"] = "wiredTiger"
+
+ return replicaset.ReplicaSetFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath,
+ num_nodes=3,
+ auth_options=self.auth_options,
+ replset_config_options={"configsvr": True})
+
+ def _new_shard(self, index):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ shard in a sharded cluster.
+ """
+
+ logger_name = "%s:shard%d" % (self.logger.name, index)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongos(self):
+ """
+ Returns a _MongoSFixture configured to be used as the mongos for
+ a sharded cluster.
+ """
+
+ logger_name = "%s:mongos" % (self.logger.name)
+ mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongos_options = copy.deepcopy(self.mongos_options)
+ if self.separate_configsvr:
+ configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ configdb_port = self.configsvr.port
+ mongos_options["configdb"] = "%s/localhost:%d" % (configdb_replset, configdb_port)
+ else:
+ mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
+
+ return _MongoSFixture(mongos_logger,
+ self.job_num,
+ mongos_executable=self.mongos_executable,
+ mongos_options=mongos_options)
+
+ def _add_shard(self, client, shard):
+ """
+ Add the specified program as a shard by executing the addShard
+ command.
+
+ See https://docs.mongodb.org/manual/reference/command/addShard
+ for more details.
+ """
+
+ self.logger.info("Adding localhost:%d as a shard...", shard.port)
+ client.admin.command({"addShard": "localhost:%d" % (shard.port)})
+
+
+class _MongoSFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a mongos to connect to.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ # Command line options override the YAML configuration.
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
+
+ self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
+
+ self.mongos = None
+
+ def setup(self):
+ if "chunkSize" not in self.mongos_options:
+ self.mongos_options["chunkSize"] = 50
+
+ if "port" not in self.mongos_options:
+ self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongos_options["port"]
+
+ mongos = core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.mongos_options)
+ try:
+ self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
+ mongos.start()
+ self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
+ except:
+ self.logger.exception("Failed to start mongos on port %d.", self.port)
+ raise
+
+ self.mongos = mongos
+
+ def await_ready(self):
+ deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongos is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongos exited for some reason.
+ exit_code = self.mongos.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongos on port %d after %d seconds"
+ % (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongos on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongos on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongos on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos on port %d with pid %d...",
+ self.port,
+ self.mongos.pid)
+ self.mongos.stop()
+
+ exit_code = self.mongos.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos on port %d, exited with code"
+ " %d",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongos is not None and self.mongos.poll() is None
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
new file mode 100644
index 00000000000..a8c1dc597c5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -0,0 +1,151 @@
+"""
+Standalone mongod fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import time
+
+import pymongo
+
+from . import interface
+from ... import config
+from ... import core
+from ... import errors
+from ... import utils
+
+
+class MongoDFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a standalone mongod to run
+ against.
+ """
+
+ AWAIT_READY_TIMEOUT_SECS = 300
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options and dbpath_prefix is not None:
+ raise ValueError("Cannot specify both mongod_options.dbpath and dbpath_prefix")
+
+ # Command line options override the YAML configuration.
+ self.mongod_executable = utils.default_if_none(config.MONGOD_EXECUTABLE, mongod_executable)
+
+ self.mongod_options = utils.default_if_none(mongod_options, {}).copy()
+ self.preserve_dbpath = preserve_dbpath
+
+ # The dbpath in mongod_options takes precedence over other settings to make it easier for
+ # users to specify a dbpath containing data to test against.
+ if "dbpath" not in self.mongod_options:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self.mongod_options["dbpath"] = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+ self._dbpath = self.mongod_options["dbpath"]
+
+ self.mongod = None
+
+ def setup(self):
+ if not self.preserve_dbpath:
+ shutil.rmtree(self._dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(self._dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ if "port" not in self.mongod_options:
+ self.mongod_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongod_options["port"]
+
+ mongod = core.programs.mongod_program(self.logger,
+ executable=self.mongod_executable,
+ **self.mongod_options)
+ try:
+ self.logger.info("Starting mongod on port %d...\n%s", self.port, mongod.as_command())
+ mongod.start()
+ self.logger.info("mongod started on port %d with pid %d.", self.port, mongod.pid)
+ except:
+ self.logger.exception("Failed to start mongod on port %d.", self.port)
+ raise
+
+ self.mongod = mongod
+
+ def await_ready(self):
+ deadline = time.time() + MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongod is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongod exited for some reason.
+ exit_code = self.mongod.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongod on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongod on port %d after %d seconds"
+ % (self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongod on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongod on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongod on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongod is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongod on port %d with pid %d...",
+ self.port,
+ self.mongod.pid)
+ self.mongod.stop()
+
+ exit_code = self.mongod.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongod on port %d, exited with code"
+ " %d.",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongod is not None and self.mongod.poll() is None
+
+ def get_connection_string(self):
+ if self.mongod is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ return "localhost:%d" % self.port
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
new file mode 100644
index 00000000000..4c580fa8392
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
@@ -0,0 +1,704 @@
+"""
+Customize the behavior of a fixture by allowing special code to be
+executed before or after each test, and before or after each suite.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+import bson
+import pymongo
+
+from . import fixtures
+from . import testcases
+from .. import errors
+from .. import logging
+from .. import utils
+
+
+def make_custom_behavior(class_name, *args, **kwargs):
+ """
+ Factory function for creating CustomBehavior instances.
+ """
+
+ if class_name not in _CUSTOM_BEHAVIORS:
+ raise ValueError("Unknown custom behavior class '%s'" % (class_name))
+ return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
+
+
+class CustomBehavior(object):
+ """
+ The common interface all CustomBehaviors will inherit from.
+ """
+
+ @staticmethod
+ def start_dynamic_test(test_case, test_report):
+ """
+ If a CustomBehavior wants to add a test case that will show up
+ in the test report, it should use this method to add it to the
+ report, since we will need to count it as a dynamic test to get
+ the stats in the summary information right.
+ """
+ test_report.startTest(test_case, dynamic=True)
+
+ def __init__(self, logger, fixture):
+ """
+ Initializes the CustomBehavior with the specified fixture.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ self.logger = logger
+ self.fixture = fixture
+
+ def before_suite(self, test_report):
+ """
+ The test runner calls this exactly once before they start
+ running the suite.
+ """
+ pass
+
+ def after_suite(self, test_report):
+ """
+ The test runner calls this exactly once after all tests have
+ finished executing. Be sure to reset the behavior back to its
+ original state so that it can be run again.
+ """
+ pass
+
+ def before_test(self, test_report):
+ """
+ Each test will call this before it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+ def after_test(self, test_report):
+ """
+ Each test will call this after it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+
+class CleanEveryN(CustomBehavior):
+ """
+ Restarts the fixture after it has ran 'n' tests.
+ On mongod-related fixtures, this will clear the dbpath.
+ """
+
+ DEFAULT_N = 20
+
+ def __init__(self, logger, fixture, n=DEFAULT_N):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ # Try to isolate what test triggers the leak by restarting the fixture each time.
+ if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
+ self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
+ " the fixture after each test instead of after every %d.", n)
+ n = 1
+
+ self.n = n
+ self.tests_run = 0
+
+ def after_test(self, test_report):
+ self.tests_run += 1
+ if self.tests_run >= self.n:
+ self.logger.info("%d tests have been run against the fixture, stopping it...",
+ self.tests_run)
+ self.tests_run = 0
+
+ teardown_success = self.fixture.teardown()
+ self.logger.info("Starting the fixture back up again...")
+ self.fixture.setup()
+ self.fixture.await_ready()
+
+ # Raise this after calling setup in case --continueOnFailure was specified.
+ if not teardown_success:
+ raise errors.TestFailure("%s did not exit cleanly" % (self.fixture))
+
+
+class CheckReplDBHash(CustomBehavior):
+ """
+ Waits for replication after each test, then checks that the dbhahses
+ of all databases other than "local" match on the primary and all of
+ the secondaries. If any dbhashes do not match, logs information
+ about what was different (e.g. Different numbers of collections,
+ missing documents in a collection, mismatching documents, etc).
+
+ Compatible only with ReplFixture subclasses.
+ """
+
+ def __init__(self, logger, fixture):
+ if not isinstance(fixture, fixtures.ReplFixture):
+ raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
+
+ CustomBehavior.__init__(self, logger, fixture)
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
+
+ self.started = False
+
+ def after_test(self, test_report):
+ """
+ After each test, check that the dbhash of the test database is
+ the same on all nodes in the replica set or master/slave
+ fixture.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ # Wait until all operations have replicated.
+ self.fixture.await_repl()
+
+ success = True
+ sb = [] # String builder.
+
+ primary = self.fixture.get_primary()
+ primary_conn = utils.new_mongo_client(port=primary.port)
+
+ for secondary in self.fixture.get_secondaries():
+ read_preference = pymongo.ReadPreference.SECONDARY
+ secondary_conn = utils.new_mongo_client(port=secondary.port,
+ read_preference=read_preference)
+ # Skip arbiters.
+ if secondary_conn.admin.command("isMaster").get("arbiterOnly", False):
+ continue
+
+ all_matched = CheckReplDBHash._check_all_db_hashes(primary_conn,
+ secondary_conn,
+ sb)
+ if not all_matched:
+ sb.insert(0,
+ "One or more databases were different between the primary on port %d"
+ " and the secondary on port %d:"
+ % (primary.port, secondary.port))
+
+ success = all_matched and success
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("The dbhashes did not match")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("The dbhashes did not match.")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+ except pymongo.errors.WTimeoutError:
+ self.test_case.logger.exception("Awaiting replication timed out.")
+ self.test_case.return_code = 2
+ test_report.addError(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.StopExecution("Awaiting replication timed out")
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #dbhash# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("The dbhashes matched for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_db_hashes(primary_conn, secondary_conn, sb):
+ """
+ Returns true if for each non-local database, the dbhash command
+ returns the same MD5 hash on the primary as it does on the
+ secondary. Returns false otherwise.
+
+ Logs a message describing the differences if any database's
+ dbhash did not match.
+ """
+
+ # Overview of how we'll check that everything replicated correctly between these two nodes:
+ #
+ # - Check whether they have the same databases.
+ # - If not, log which databases are missing where, and dump the contents of any that are
+ # missing.
+ #
+ # - Check whether each database besides "local" gives the same md5 field as the result of
+ # running the dbhash command.
+ # - If not, check whether they have the same collections.
+ # - If not, log which collections are missing where, and dump the contents of any
+ # that are missing.
+ # - If so, check that the hash of each non-capped collection matches.
+ # - If any do not match, log the diff of the collection between the two nodes.
+
+ success = True
+
+ if not CheckReplDBHash._check_dbs_present(primary_conn, secondary_conn, sb):
+ return False
+
+ for db_name in primary_conn.database_names():
+ if db_name == "local":
+ continue # We don't expect this to match across different nodes.
+
+ matched = CheckReplDBHash._check_db_hash(primary_conn, secondary_conn, db_name, sb)
+ success = matched and success
+
+ return success
+
+ @staticmethod
+ def _check_dbs_present(primary_conn, secondary_conn, sb):
+ """
+ Returns true if the list of databases on the primary is
+ identical to the list of databases on the secondary, and false
+ otherwise.
+ """
+
+ success = True
+ primary_dbs = primary_conn.database_names()
+
+ # Can't run database_names() on secondary, so instead use the listDatabases command.
+ # TODO: Use database_names() once PYTHON-921 is resolved.
+ list_db_output = secondary_conn.admin.command("listDatabases")
+ secondary_dbs = [db["name"] for db in list_db_output["databases"]]
+
+ # There may be a difference in databases which is not considered an error, when
+ # the database only contains system collections. This difference is only logged
+ # when others are encountered, i.e., success = False.
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ set(primary_dbs), set(secondary_dbs), "database")
+
+ for missing_db in missing_on_secondary:
+ db = primary_conn[missing_db]
+ coll_names = db.collection_names()
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined whether they should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on primary but not on secondary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ for missing_db in missing_on_primary:
+ db = secondary_conn[missing_db]
+
+ # Can't run collection_names() on secondary, so instead use the listCollections command.
+ # TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
+ # logic that is duplicated here can be consolidated.
+ list_coll_output = db.command("listCollections")["cursor"]["firstBatch"]
+ coll_names = [coll["name"] for coll in list_coll_output]
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined if it should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on secondary but not on primary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ return success
+
+ @staticmethod
+ def _check_db_hash(primary_conn, secondary_conn, db_name, sb):
+ """
+ Returns true if the dbhash for 'db_name' matches on the primary
+ and the secondary, and false otherwise.
+
+ Appends a message to 'sb' describing the differences if the
+ dbhashes do not match.
+ """
+
+ primary_hash = primary_conn[db_name].command("dbhash")
+ secondary_hash = secondary_conn[db_name].command("dbhash")
+
+ if primary_hash["md5"] == secondary_hash["md5"]:
+ return True
+
+ success = CheckReplDBHash._check_dbs_eq(
+ primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb)
+
+ if not success:
+ sb.append("Database %s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, primary_hash["md5"], secondary_hash["md5"]))
+
+ return success
+
+ @staticmethod
+ def _check_dbs_eq(primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb):
+ """
+ Returns true if all non-capped collections had the same hash in
+ the dbhash response, and false otherwise.
+
+ Appends information to 'sb' about the differences between the
+ 'db_name' database on the primary and the 'db_name' database on
+ the secondary, if any.
+ """
+
+ success = True
+
+ primary_db = primary_conn[db_name]
+ secondary_db = secondary_conn[db_name]
+
+ primary_coll_hashes = primary_hash["collections"]
+ secondary_coll_hashes = secondary_hash["collections"]
+
+ primary_coll_names = set(primary_coll_hashes.keys())
+ secondary_coll_names = set(secondary_coll_hashes.keys())
+
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ primary_coll_names, secondary_coll_names, "collection", sb=sb)
+
+ if missing_on_primary or missing_on_secondary:
+
+ # 'sb' already describes which collections are missing where.
+ for coll_name in missing_on_primary:
+ CheckReplDBHash._dump_all_documents(secondary_db, coll_name, sb)
+ for coll_name in missing_on_secondary:
+ CheckReplDBHash._dump_all_documents(primary_db, coll_name, sb)
+ return
+
+ for coll_name in primary_coll_names & secondary_coll_names:
+ primary_coll_hash = primary_coll_hashes[coll_name]
+ secondary_coll_hash = secondary_coll_hashes[coll_name]
+
+ if primary_coll_hash == secondary_coll_hash:
+ continue
+
+ # Ignore capped collections because they are not expected to match on all nodes.
+ if primary_db.command({"collStats": coll_name})["capped"]:
+ # Still fail if the collection is not capped on the secondary.
+ if not secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on primary but not on secondary."
+ % (primary_db.name, coll_name))
+ sb.append("%s.%s collection is capped, ignoring." % (primary_db.name, coll_name))
+ continue
+ # Still fail if the collection is capped on the secondary, but not on the primary.
+ elif secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on secondary but not on primary."
+ % (primary_db.name, coll_name))
+ continue
+
+ success = False
+ sb.append("Collection %s.%s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, coll_name, primary_coll_hash, secondary_coll_hash))
+ CheckReplDBHash._check_colls_eq(primary_db, secondary_db, coll_name, sb)
+
+ if success:
+ sb.append("All collections that were expected to match did.")
+ return success
+
+ @staticmethod
+ def _check_colls_eq(primary_db, secondary_db, coll_name, sb):
+ """
+ Appends information to 'sb' about the differences or between
+ the 'coll_name' collection on the primary and the 'coll_name'
+ collection on the secondary, if any.
+ """
+
+ codec_options = bson.CodecOptions(document_class=TypeSensitiveSON)
+
+ primary_coll = primary_db.get_collection(coll_name, codec_options=codec_options)
+ secondary_coll = secondary_db.get_collection(coll_name, codec_options=codec_options)
+
+ primary_docs = CheckReplDBHash._extract_documents(primary_coll)
+ secondary_docs = CheckReplDBHash._extract_documents(secondary_coll)
+
+ CheckReplDBHash._get_collection_diff(primary_docs, secondary_docs, sb)
+
+ @staticmethod
+ def _extract_documents(collection):
+ """
+ Returns a list of all documents in the collection, sorted by
+ their _id.
+ """
+
+ return [doc for doc in collection.find().sort("_id", pymongo.ASCENDING)]
+
+ @staticmethod
+ def _get_collection_diff(primary_docs, secondary_docs, sb):
+ """
+ Returns true if the documents in 'primary_docs' exactly match
+ the documents in 'secondary_docs', and false otherwise.
+
+ Appends information to 'sb' about what matched or did not match.
+ """
+
+ matched = True
+
+ # These need to be lists instead of sets because documents aren't hashable.
+ missing_on_primary = []
+ missing_on_secondary = []
+
+ p_idx = 0 # Keep track of our position in 'primary_docs'.
+ s_idx = 0 # Keep track of our position in 'secondary_docs'.
+
+ while p_idx < len(primary_docs) and s_idx < len(secondary_docs):
+ primary_doc = primary_docs[p_idx]
+ secondary_doc = secondary_docs[s_idx]
+
+ if primary_doc == secondary_doc:
+ p_idx += 1
+ s_idx += 1
+ continue
+
+ # We have mismatching documents.
+ matched = False
+
+ if primary_doc["_id"] == secondary_doc["_id"]:
+ sb.append("Mismatching document:")
+ sb.append(" primary: %s" % (primary_doc))
+ sb.append(" secondary: %s" % (secondary_doc))
+ p_idx += 1
+ s_idx += 1
+
+ # One node was missing a document. Since the documents are sorted by _id, the doc with
+ # the smaller _id was the one that was skipped.
+ elif primary_doc["_id"] < secondary_doc["_id"]:
+ missing_on_secondary.append(primary_doc)
+
+ # Only move past the doc that we know was skipped.
+ p_idx += 1
+
+ else: # primary_doc["_id"] > secondary_doc["_id"]
+ missing_on_primary.append(secondary_doc)
+
+ # Only move past the doc that we know was skipped.
+ s_idx += 1
+
+ # Check if there are any unmatched documents left.
+ while p_idx < len(primary_docs):
+ matched = False
+ missing_on_secondary.append(primary_docs[p_idx])
+ p_idx += 1
+ while s_idx < len(secondary_docs):
+ matched = False
+ missing_on_primary.append(secondary_docs[s_idx])
+ s_idx += 1
+
+ if not matched:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, "document", sb)
+ else:
+ sb.append("All documents matched.")
+
+ @staticmethod
+ def _check_difference(primary_set, secondary_set, item_type_name, sb=None):
+ """
+ Returns true if the contents of 'primary_set' and
+ 'secondary_set' are identical, and false otherwise. The sets
+ contain information about the primary and secondary,
+ respectively, e.g. the database names that exist on each node.
+
+ Appends information about anything that differed to 'sb'.
+ """
+
+ missing_on_primary = set()
+ missing_on_secondary = set()
+
+ for item in primary_set - secondary_set:
+ missing_on_secondary.add(item)
+
+ for item in secondary_set - primary_set:
+ missing_on_primary.add(item)
+
+ if sb is not None:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, item_type_name, sb)
+
+ return (missing_on_primary, missing_on_secondary)
+
+ @staticmethod
+ def _append_differences(missing_on_primary, missing_on_secondary, item_type_name, sb):
+ """
+ Given two iterables representing items that were missing on the
+ primary or the secondary respectively, append the information
+ about which items were missing to 'sb', if any.
+ """
+
+ if missing_on_primary:
+ sb.append("The following %ss were present on the secondary, but not on the"
+ " primary:" % (item_type_name))
+ for item in missing_on_primary:
+ sb.append(str(item))
+
+ if missing_on_secondary:
+ sb.append("The following %ss were present on the primary, but not on the"
+ " secondary:" % (item_type_name))
+ for item in missing_on_secondary:
+ sb.append(str(item))
+
+ @staticmethod
+ def _dump_all_collections(database, coll_names, sb):
+ """
+ Appends the contents of each of the collections in 'coll_names'
+ to 'sb'.
+ """
+
+ if coll_names:
+ sb.append("Database %s contains the following collections: %s"
+ % (database.name, coll_names))
+ for coll_name in coll_names:
+ CheckReplDBHash._dump_all_documents(database, coll_name, sb)
+ else:
+ sb.append("No collections in database %s." % (database.name))
+
+ @staticmethod
+ def _dump_all_documents(database, coll_name, sb):
+ """
+ Appends the contents of 'coll_name' to 'sb'.
+ """
+
+ docs = CheckReplDBHash._extract_documents(database[coll_name])
+ if docs:
+ sb.append("Documents in %s.%s:" % (database.name, coll_name))
+ for doc in docs:
+ sb.append(" %s" % (doc))
+ else:
+ sb.append("No documents in %s.%s." % (database.name, coll_name))
+
+class TypeSensitiveSON(bson.SON):
+ """
+ Extends bson.SON to perform additional type-checking of document values
+ to differentiate BSON types.
+ """
+
+ def items_with_types(self):
+ """
+ Returns a list of triples. Each triple consists of a field name, a
+ field value, and a field type for each field in the document.
+ """
+
+ return [(key, self[key], type(self[key])) for key in self]
+
+ def __eq__(self, other):
+ """
+ Comparison to another TypeSensitiveSON is order-sensitive and
+ type-sensitive while comparison to a regular dictionary ignores order
+ and type mismatches.
+ """
+
+ if isinstance(other, TypeSensitiveSON):
+ return (len(self) == len(other) and
+ self.items_with_types() == other.items_with_types())
+
+ raise TypeError("TypeSensitiveSON objects cannot be compared to other types")
+
+class ValidateCollections(CustomBehavior):
+ """
+ Runs full validation (db.collection.validate(true)) on all collections
+ in all databases on every standalone, or primary mongod. If validation
+ fails (validate.valid), then the validate return object is logged.
+
+ Compatible with all subclasses.
+ """
+ DEFAULT_FULL = True
+ DEFAULT_SCANDATA = True
+
+ def __init__(self, logger, fixture, full=DEFAULT_FULL, scandata=DEFAULT_SCANDATA):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ if not isinstance(full, bool):
+ raise TypeError("Fixture option full is not specified as type bool")
+
+ if not isinstance(scandata, bool):
+ raise TypeError("Fixture option scandata is not specified as type bool")
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#validate#")
+ self.started = False
+ self.full = full
+ self.scandata = scandata
+
+ def after_test(self, test_report):
+ """
+ After each test, run a full validation on all collections.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ sb = [] # String builder.
+
+ # The self.fixture.port can be used for client connection to a
+ # standalone mongod, a replica-set primary, or mongos.
+ # TODO: Run collection validation on all nodes in a replica-set.
+ port = self.fixture.port
+ conn = utils.new_mongo_client(port=port)
+
+ success = ValidateCollections._check_all_collections(
+ conn, sb, self.full, self.scandata)
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("Collection validation failed")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("Collection validation failed")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #validate# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("Collection validation passed for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_collections(conn, sb, full, scandata):
+ """
+ Returns true if for all databases and collections validate_collection
+ succeeds. Returns false otherwise.
+
+ Logs a message if any database's collection fails validate_collection.
+ """
+
+ success = True
+
+ for db_name in conn.database_names():
+ for coll_name in conn[db_name].collection_names():
+ try:
+ conn[db_name].validate_collection(coll_name, full=full, scandata=scandata)
+ except pymongo.errors.CollectionInvalid as err:
+ sb.append("Database %s, collection %s failed to validate:\n%s"
+ % (db_name, coll_name, err.args[0]))
+ success = False
+ return success
+
+
+_CUSTOM_BEHAVIORS = {
+ "CleanEveryN": CleanEveryN,
+ "CheckReplDBHash": CheckReplDBHash,
+ "ValidateCollections": ValidateCollections,
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/job.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/job.py
new file mode 100644
index 00000000000..bc5705ffdfb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/job.py
@@ -0,0 +1,195 @@
+"""
+Enables supports for running tests simultaneously by processing them
+from a multi-consumer queue.
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+from .. import config
+from .. import errors
+from ..utils import queue as _queue
+
+
+class Job(object):
+ """
+ Runs tests from a queue.
+ """
+
+ def __init__(self, logger, fixture, hooks, report):
+ """
+ Initializes the job with the specified fixture and custom
+ behaviors.
+ """
+
+ self.logger = logger
+ self.fixture = fixture
+ self.hooks = hooks
+ self.report = report
+
+ def __call__(self, queue, interrupt_flag):
+ """
+ Continuously executes tests from 'queue' and records their
+ details in 'report'.
+ """
+
+ should_stop = False
+ try:
+ self._run(queue, interrupt_flag)
+ except errors.StopExecution as err:
+ # Stop running tests immediately.
+ self.logger.error("Received a StopExecution exception: %s.", err)
+ should_stop = True
+ except:
+ # Unknown error, stop execution.
+ self.logger.exception("Encountered an error during test execution.")
+ should_stop = True
+
+ if should_stop:
+ # Set the interrupt flag so that other jobs do not start running more tests.
+ interrupt_flag.set()
+ # Drain the queue to unblock the main thread.
+ Job._drain_queue(queue)
+
+ def _run(self, queue, interrupt_flag):
+ """
+ Calls the before/after suite hooks and continuously executes
+ tests from 'queue'.
+ """
+
+ for hook in self.hooks:
+ hook.before_suite(self.report)
+
+ while not interrupt_flag.is_set():
+ test = queue.get_nowait()
+ try:
+ if test is None:
+ # Sentinel value received, so exit.
+ break
+ self._execute_test(test)
+ finally:
+ queue.task_done()
+
+ for hook in self.hooks:
+ hook.after_suite(self.report)
+
+ def _execute_test(self, test):
+ """
+ Calls the before/after test hooks and executes 'test'.
+ """
+
+ test.configure(self.fixture)
+ self._run_hooks_before_tests(test)
+
+ test(self.report)
+ if config.FAIL_FAST and not self.report.wasSuccessful():
+ test.logger.info("%s failed, so stopping..." % (test.shortDescription()))
+ raise errors.StopExecution("%s failed" % (test.shortDescription()))
+
+ if not self.fixture.is_running():
+ self.logger.error("%s marked as a failure because the fixture crashed during the test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ # Always fail fast if the fixture fails.
+ raise errors.StopExecution("%s not running after %s" %
+ (self.fixture, test.shortDescription()))
+
+ self._run_hooks_after_tests(test)
+
+ def _run_hooks_before_tests(self, test):
+ """
+ Runs the before_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+
+ try:
+ for hook in self.hooks:
+ hook.before_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=2)
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except:
+ # Record the before_test() error in 'self.report'.
+ self.report.startTest(test)
+ self.report.addError(test, sys.exc_info())
+ self.report.stopTest(test)
+ raise
+
+ def _run_hooks_after_tests(self, test):
+ """
+ Runs the after_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+ try:
+ for hook in self.hooks:
+ hook.after_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except:
+ self.report.setError(test)
+ raise
+
+ def _fail_test(self, test, exc_info, return_code=1):
+ """
+ Helper to record a test as a failure with the provided return
+ code.
+
+ This method should not be used if 'test' has already been
+ started, instead use TestReport.setFailure().
+ """
+
+ self.report.startTest(test)
+ test.return_code = return_code
+ self.report.addFailure(test, exc_info)
+ self.report.stopTest(test)
+
+ @staticmethod
+ def _drain_queue(queue):
+ """
+ Removes all elements from 'queue' without actually doing
+ anything to them. Necessary to unblock the main thread that is
+ waiting for 'queue' to be empty.
+ """
+
+ try:
+ while not queue.empty():
+ queue.get_nowait()
+ queue.task_done()
+ except _queue.Empty:
+ # Multiple threads may be draining the queue simultaneously, so just ignore the
+ # exception from the race between queue.empty() being false and failing to get an item.
+ pass
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/report.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/report.py
new file mode 100644
index 00000000000..61468e1dd41
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/report.py
@@ -0,0 +1,330 @@
+"""
+Extension to the unittest.TestResult to support additional test status
+and timing information for the report.json file.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import time
+import unittest
+
+from .. import config
+from .. import logging
+
+
+class TestReport(unittest.TestResult):
+ """
+ Records test status and timing information.
+ """
+
+ def __init__(self, logger, logging_config, build_id=None, build_config=None):
+ """
+ Initializes the TestReport with the buildlogger configuration.
+ """
+
+ unittest.TestResult.__init__(self)
+
+ self.logger = logger
+ self.logging_config = logging_config
+ self.build_id = build_id
+ self.build_config = build_config
+
+ self.reset()
+
+ @classmethod
+ def combine(cls, *reports):
+ """
+ Merges the results from multiple TestReport instances into one.
+
+ If the same test is present in multiple reports, then one that
+ failed or errored is more preferred over one that succeeded.
+ This behavior is useful for when running multiple jobs that
+ dynamically add a #dbhash# test case.
+ """
+
+ combined_report = cls(logging.loggers.EXECUTOR, {})
+ combining_time = time.time()
+
+ for report in reports:
+ if not isinstance(report, TestReport):
+ raise TypeError("reports must be a list of TestReport instances")
+
+ for test_info in report.test_infos:
+ # If the user triggers a KeyboardInterrupt exception while a test is running, then
+ # it is possible for 'test_info' to be modified by a job thread later on. We make a
+ # shallow copy in order to ensure 'num_failed' is consistent with the actual number
+ # of tests that have status equal to "failed".
+ test_info = copy.copy(test_info)
+
+ # TestReport.addXX() may not have been called.
+ if test_info.status is None or test_info.return_code is None:
+ # Mark the test as having failed if it was interrupted. It might have passed if
+ # the suite ran to completion, but we wouldn't know for sure.
+ test_info.status = "fail"
+ test_info.return_code = -2
+
+ # TestReport.stopTest() may not have been called.
+ if test_info.end_time is None:
+ # Use the current time as the time that the test finished running.
+ test_info.end_time = combining_time
+
+ combined_report.test_infos.append(test_info)
+
+ combined_report.num_dynamic += report.num_dynamic
+
+ # Recompute number of success, failures, and errors.
+ combined_report.num_succeeded = len(combined_report.get_successful())
+ combined_report.num_failed = len(combined_report.get_failed())
+ combined_report.num_errored = len(combined_report.get_errored())
+
+ return combined_report
+
+ def startTest(self, test, dynamic=False):
+ """
+ Called immediately before 'test' is run.
+ """
+
+ unittest.TestResult.startTest(self, test)
+
+ test_info = _TestInfo(test.id(), dynamic)
+ test_info.start_time = time.time()
+ self.test_infos.append(test_info)
+
+ basename = test.basename()
+ if dynamic:
+ command = "(dynamic test case)"
+ self.num_dynamic += 1
+ else:
+ command = test.as_command()
+ self.logger.info("Running %s...\n%s", basename, command)
+
+ test_id = logging.buildlogger.new_test_id(self.build_id,
+ self.build_config,
+ basename,
+ command)
+
+ if self.build_id is not None:
+ endpoint = logging.buildlogger.APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": test_id,
+ }
+
+ test_info.url_endpoint = "%s/%s/" % (config.BUILDLOGGER_URL.rstrip("/"),
+ endpoint.strip("/"))
+
+ self.logger.info("Writing output of %s to %s.",
+ test.shortDescription(),
+ test_info.url_endpoint)
+
+ # Set up the test-specific logger.
+ logger_name = "%s:%s" % (test.logger.name, test.short_name())
+ logger = logging.loggers.new_logger(logger_name, parent=test.logger)
+ logging.config.apply_buildlogger_test_handler(logger,
+ self.logging_config,
+ build_id=self.build_id,
+ build_config=self.build_config,
+ test_id=test_id)
+
+ self.__original_loggers[test_info.test_id] = test.logger
+ test.logger = logger
+
+ def stopTest(self, test):
+ """
+ Called immediately after 'test' has run.
+ """
+
+ unittest.TestResult.stopTest(self, test)
+
+ test_info = self._find_test_info(test)
+ test_info.end_time = time.time()
+
+ time_taken = test_info.end_time - test_info.start_time
+ self.logger.info("%s ran in %0.2f seconds.", test.basename(), time_taken)
+
+ # Asynchronously closes the buildlogger test handler to avoid having too many threads open
+ # on 32-bit systems.
+ logging.flush.close_later(test.logger)
+
+ # Restore the original logger for the test.
+ test.logger = self.__original_loggers.pop(test.id())
+
+ def addError(self, test, err):
+ """
+ Called when a non-failureException was raised during the
+ execution of 'test'.
+ """
+
+ unittest.TestResult.addError(self, test, err)
+ self.num_errored += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "error"
+ test_info.return_code = test.return_code
+
+ def setError(self, test):
+ """
+ Used to change the outcome of an existing test to an error.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "error"
+ test_info.return_code = 2
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addFailure(self, test, err):
+ """
+ Called when a failureException was raised during the execution
+ of 'test'.
+ """
+
+ unittest.TestResult.addFailure(self, test, err)
+ self.num_failed += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "fail"
+ test_info.return_code = test.return_code
+
+ def setFailure(self, test, return_code=1):
+ """
+ Used to change the outcome of an existing test to a failure.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "fail"
+ test_info.return_code = return_code
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addSuccess(self, test):
+ """
+ Called when 'test' executed successfully.
+ """
+
+ unittest.TestResult.addSuccess(self, test)
+ self.num_succeeded += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "pass"
+ test_info.return_code = test.return_code
+
+ def wasSuccessful(self):
+ """
+ Returns true if all tests executed successfully.
+ """
+ return self.num_failed == self.num_errored == 0
+
+ def get_successful(self):
+ """
+ Returns the status and timing information of the tests that
+ executed successfully.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "pass"]
+
+ def get_failed(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "fail"]
+
+ def get_errored(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a non-failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "error"]
+
+ def as_dict(self):
+ """
+ Return the test result information as a dictionary.
+
+ Used to create the report.json file.
+ """
+
+ results = []
+ for test_info in self.test_infos:
+ # Don't distinguish between failures and errors.
+ status = "pass" if test_info.status == "pass" else "fail"
+
+ result = {
+ "test_file": test_info.test_id,
+ "status": status,
+ "exit_code": test_info.return_code,
+ "start": test_info.start_time,
+ "end": test_info.end_time,
+ "elapsed": test_info.end_time - test_info.start_time,
+ }
+
+ if test_info.url_endpoint is not None:
+ result["url"] = test_info.url_endpoint
+
+ results.append(result)
+
+ return {
+ "results": results,
+ "failures": self.num_failed + self.num_errored,
+ }
+
+ def reset(self):
+ """
+ Resets the test report back to its initial state.
+ """
+
+ self.test_infos = []
+
+ self.num_dynamic = 0
+ self.num_succeeded = 0
+ self.num_failed = 0
+ self.num_errored = 0
+
+ self.__original_loggers = {}
+
+ def _find_test_info(self, test):
+ """
+ Returns the status and timing information associated with
+ 'test'.
+ """
+
+ test_id = test.id()
+
+ # Search the list backwards to efficiently find the status and timing information of a test
+ # that was recently started.
+ for test_info in reversed(self.test_infos):
+ if test_info.test_id == test_id:
+ return test_info
+
+ raise ValueError("Details for %s not found in the report" % (test.basename()))
+
+
+class _TestInfo(object):
+ """
+ Holder for the test status and timing information.
+ """
+
+ def __init__(self, test_id, dynamic):
+ """
+ Initializes the _TestInfo instance.
+ """
+
+ self.test_id = test_id
+ self.dynamic = dynamic
+
+ self.start_time = None
+ self.end_time = None
+ self.status = None
+ self.return_code = None
+ self.url_endpoint = None
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/suite.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/suite.py
new file mode 100644
index 00000000000..65503b85e8b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/suite.py
@@ -0,0 +1,140 @@
+"""
+Holder for a set of TestGroup instances.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+from . import testgroup
+from .. import selector as _selector
+
+
+class Suite(object):
+ """
+ A suite of tests.
+ """
+
+ TESTS_ORDER = ("cpp_unit_test", "cpp_integration_test", "db_test", "js_test", "mongos_test")
+
+ def __init__(self, suite_name, suite_config):
+ """
+ Initializes the suite with the specified name and configuration.
+ """
+
+ self._suite_name = suite_name
+ self._suite_config = suite_config
+
+ self.test_groups = []
+ for test_kind in Suite.TESTS_ORDER:
+ if test_kind not in suite_config["selector"]:
+ continue
+ tests = self._get_tests_for_group(test_kind)
+ test_group = testgroup.TestGroup(test_kind, tests)
+ self.test_groups.append(test_group)
+
+ self.return_code = None
+
+ self._start_time = None
+ self._end_time = None
+
+ def _get_tests_for_group(self, test_kind):
+ """
+ Returns the tests to run based on the 'test_kind'-specific
+ filtering policy.
+ """
+
+ test_info = self.get_selector_config()[test_kind]
+
+ # The mongos_test doesn't have to filter anything, the test_info is just the arguments to
+ # the mongos program to be used as the test case.
+ if test_kind == "mongos_test":
+ mongos_options = test_info # Just for easier reading.
+ if not isinstance(mongos_options, dict):
+ raise TypeError("Expected dictionary of arguments to mongos")
+ return [mongos_options]
+ elif test_kind == "cpp_integration_test":
+ tests = _selector.filter_cpp_integration_tests(**test_info)
+ elif test_kind == "cpp_unit_test":
+ tests = _selector.filter_cpp_unit_tests(**test_info)
+ elif test_kind == "db_test":
+ tests = _selector.filter_dbtests(**test_info)
+ else: # test_kind == "js_test":
+ tests = _selector.filter_jstests(**test_info)
+
+ return sorted(tests, key=str.lower)
+
+ def get_name(self):
+ """
+ Returns the name of the test suite.
+ """
+ return self._suite_name
+
+ def get_selector_config(self):
+ """
+ Returns the "selector" section of the YAML configuration.
+ """
+ return self._suite_config["selector"]
+
+ def get_executor_config(self):
+ """
+ Returns the "executor" section of the YAML configuration.
+ """
+ return self._suite_config["executor"]
+
+ def record_start(self):
+ """
+ Records the start time of the suite.
+ """
+ self._start_time = time.time()
+
+ def record_end(self):
+ """
+ Records the end time of the suite.
+
+ Sets the 'return_code' of the suite based on the record codes of
+ each of the individual test groups.
+ """
+
+ self._end_time = time.time()
+
+ # Only set 'return_code' if it hasn't been set already. It may have been set if there was
+ # an exception that happened during the execution of the suite.
+ if self.return_code is None:
+ # The return code of the suite should be 2 if any test group has a return code of 2.
+ # The return code of the suite should be 1 if any test group has a return code of 1,
+ # and none have a return code of 2. Otherwise, the return code should be 0.
+ self.return_code = max(test_group.return_code for test_group in self.test_groups)
+
+ def summarize(self, sb):
+ """
+ Appends a summary of each individual test group onto the string
+ builder 'sb'.
+ """
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ summarized_groups = []
+ for group in self.test_groups:
+ group_sb = []
+ summary = group.summarize(group_sb)
+ summarized_groups.append(" %ss: %s" % (group.test_kind, "\n ".join(group_sb)))
+
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ if combined_summary.num_run == 0:
+ sb.append("Suite did not run any tests.")
+ return
+
+ # Override the 'time_taken' attribute of the summary if we have more accurate timing
+ # information available.
+ if self._start_time is not None and self._end_time is not None:
+ time_taken = self._end_time - self._start_time
+ combined_summary = combined_summary._replace(time_taken=time_taken)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % combined_summary)
+
+ for summary_text in summarized_groups:
+ sb.append(summary_text)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/summary.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/summary.py
new file mode 100644
index 00000000000..1dae9ca81d6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/summary.py
@@ -0,0 +1,22 @@
+"""
+Holder for summary information about a test group or suite.
+"""
+
+from __future__ import absolute_import
+
+import collections
+
+
+
+Summary = collections.namedtuple("Summary", ["num_run", "time_taken", "num_succeeded",
+ "num_skipped", "num_failed", "num_errored"])
+
+
+def combine(summary1, summary2):
+ """
+ Returns a summary representing the sum of 'summary1' and 'summary2'.
+ """
+ args = []
+ for i in xrange(len(Summary._fields)):
+ args.append(summary1[i] + summary2[i])
+ return Summary._make(args)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
new file mode 100644
index 00000000000..3b068c3b80f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
@@ -0,0 +1,407 @@
+"""
+Subclasses of unittest.TestCase.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import unittest
+
+from .. import config
+from .. import core
+from .. import logging
+from .. import utils
+
+
+def make_test_case(test_kind, *args, **kwargs):
+ """
+ Factory function for creating TestCase instances.
+ """
+
+ if test_kind not in _TEST_CASES:
+ raise ValueError("Unknown test kind '%s'" % (test_kind))
+ return _TEST_CASES[test_kind](*args, **kwargs)
+
+
+class TestCase(unittest.TestCase):
+ """
+ A test case to execute.
+ """
+
+ def __init__(self, logger, test_kind, test_name):
+ """
+ Initializes the TestCase with the name of the test.
+ """
+
+ unittest.TestCase.__init__(self, methodName="run_test")
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(test_kind, basestring):
+ raise TypeError("test_kind must be a string")
+
+ if not isinstance(test_name, basestring):
+ raise TypeError("test_name must be a string")
+
+ self.logger = logger
+ self.test_kind = test_kind
+ self.test_name = test_name
+
+ self.fixture = None
+ self.return_code = None
+
+ def long_name(self):
+ """
+ Returns the path to the test, relative to the current working directory.
+ """
+ return os.path.relpath(self.test_name)
+
+ def basename(self):
+ """
+ Returns the basename of the test.
+ """
+ return os.path.basename(self.test_name)
+
+ def short_name(self):
+ """
+ Returns the basename of the test without the file extension.
+ """
+ return os.path.splitext(self.basename())[0]
+
+ def id(self):
+ return self.test_name
+
+ def shortDescription(self):
+ return "%s %s" % (self.test_kind, self.test_name)
+
+ def configure(self, fixture):
+ """
+ Stores 'fixture' as an attribute for later use during execution.
+ """
+ self.fixture = fixture
+
+ def run_test(self):
+ """
+ Runs the specified test.
+ """
+ raise NotImplementedError("run_test must be implemented by TestCase subclasses")
+
+ def as_command(self):
+ """
+ Returns the command invocation used to run the test.
+ """
+ return self._make_process().as_command()
+
+ def _execute(self, process):
+ """
+ Runs the specified process.
+ """
+
+ self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
+ process.start()
+ self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
+
+ self.return_code = process.wait()
+ if self.return_code != 0:
+ raise self.failureException("%s failed" % (self.shortDescription()))
+
+ self.logger.info("%s finished.", self.shortDescription())
+
+ def _make_process(self):
+ """
+ Returns a new Process instance that could be used to run the
+ test or log the command.
+ """
+ raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
+
+
+class CPPUnitTestCase(TestCase):
+ """
+ A C++ unit test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPUnitTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.process.Process(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class CPPIntegrationTestCase(TestCase):
+ """
+ A C++ integration test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPIntegrationTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ self.program_options["connectionString"] = self.fixture.get_connection_string()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ integration test %s.",
+ self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.generic_program(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class DBTestCase(TestCase):
+ """
+ A dbtest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ dbtest_suite,
+ dbtest_executable=None,
+ dbtest_options=None):
+ """
+ Initializes the DBTestCase with the dbtest suite to run.
+ """
+
+ TestCase.__init__(self, logger, "DBTest", dbtest_suite)
+
+ # Command line options override the YAML configuration.
+ self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
+
+ self.dbtest_suite = dbtest_suite
+ self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ # If a dbpath was specified, then use it as a container for all other dbpaths.
+ dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
+ dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
+ self.dbtest_options["dbpath"] = dbpath
+
+ shutil.rmtree(dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def run_test(self):
+ try:
+ dbtest = self._make_process()
+ self._execute(dbtest)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.dbtest_program(self.logger,
+ executable=self.dbtest_executable,
+ suites=[self.dbtest_suite],
+ **self.dbtest_options)
+
+ @staticmethod
+ def _get_dbpath_prefix():
+ """
+ Returns the prefix of the dbpath to use for the dbtest
+ executable.
+
+ Order of preference:
+ 1. The --dbpathPrefix specified at the command line.
+ 2. Value of the TMPDIR environment variable.
+ 3. Value of the TEMP environment variable.
+ 4. Value of the TMP environment variable.
+ 5. The /tmp directory.
+ """
+
+ if config.DBPATH_PREFIX is not None:
+ return config.DBPATH_PREFIX
+
+ for env_var in ("TMPDIR", "TEMP", "TMP"):
+ if env_var in os.environ:
+ return os.environ[env_var]
+ return os.path.normpath("/tmp")
+
+
+class JSTestCase(TestCase):
+ """
+ A jstest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ js_filename,
+ shell_executable=None,
+ shell_options=None):
+ "Initializes the JSTestCase with the JS file to run."
+
+ TestCase.__init__(self, logger, "JSTest", js_filename)
+
+ # Command line options override the YAML configuration.
+ self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
+
+ self.js_filename = js_filename
+ self.shell_options = utils.default_if_none(shell_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ if self.fixture.port is not None:
+ self.shell_options["port"] = self.fixture.port
+
+ global_vars = self.shell_options.get("global_vars", {}).copy()
+ data_dir = self._get_data_dir(global_vars)
+
+ # Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
+ if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
+ # dataPath property is the dataDir property with a trailing slash.
+ data_path = os.path.join(data_dir, "")
+ else:
+ data_path = global_vars["MongoRunner.dataPath"]
+
+ global_vars["MongoRunner.dataDir"] = data_dir
+ global_vars["MongoRunner.dataPath"] = data_path
+
+ test_data = global_vars.get("TestData", {}).copy()
+ test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
+ test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
+
+ global_vars["TestData"] = test_data
+ self.shell_options["global_vars"] = global_vars
+
+ shutil.rmtree(data_dir, ignore_errors=True)
+
+ try:
+ os.makedirs(data_dir)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def _get_data_dir(self, global_vars):
+ """
+ Returns the value that the mongo shell should set for the
+ MongoRunner.dataDir property.
+ """
+
+ # Command line options override the YAML configuration.
+ data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
+ global_vars.get("MongoRunner.dataDir"))
+ data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
+ return os.path.join(data_dir_prefix,
+ "job%d" % (self.fixture.job_num),
+ config.MONGO_RUNNER_SUBDIR)
+
+ def run_test(self):
+ try:
+ shell = self._make_process()
+ self._execute(shell)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running jstest %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongo_shell_program(self.logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ **self.shell_options)
+
+
+class MongosTestCase(TestCase):
+ """
+ A TestCase which runs a mongos binary with the given parameters.
+ """
+
+ def __init__(self,
+ logger,
+ mongos_options):
+ """
+ Initializes the mongos test and saves the options.
+ """
+
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
+ config.DEFAULT_MONGOS_EXECUTABLE)
+ # Use the executable as the test name.
+ TestCase.__init__(self, logger, "mongos", self.mongos_executable)
+ self.options = mongos_options.copy()
+
+ def configure(self, fixture):
+ """
+ Ensures the --test option is present in the mongos options.
+ """
+
+ TestCase.configure(self, fixture)
+ # Always specify test option to ensure the mongos will terminate.
+ if "test" not in self.options:
+ self.options["test"] = ""
+
+ def run_test(self):
+ try:
+ mongos = self._make_process()
+ self._execute(mongos)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running %s.", mongos.as_command())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.options)
+
+
+_TEST_CASES = {
+ "cpp_unit_test": CPPUnitTestCase,
+ "cpp_integration_test": CPPIntegrationTestCase,
+ "db_test": DBTestCase,
+ "js_test": JSTestCase,
+ "mongos_test": MongosTestCase,
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
new file mode 100644
index 00000000000..688d56c296d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
@@ -0,0 +1,132 @@
+"""
+Holder for the (test kind, list of tests) pair with additional metadata
+about when and how they execute.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+
+
+class TestGroup(object):
+ """
+ A class to encapsulate the results of running a group of tests
+ of a particular kind (e.g. C++ unit tests, dbtests, jstests).
+ """
+
+ def __init__(self, test_kind, tests):
+ """
+ Initializes the TestGroup with a list of tests.
+ """
+
+ self.test_kind = test_kind
+ self.tests = tests
+
+ self.return_code = None # Set by the executor.
+
+ self._start_times = []
+ self._end_times = []
+ self._reports = []
+
+ def get_reports(self):
+ """
+ Returns the list of reports.
+ """
+ return self._reports
+
+ def record_start(self):
+ """
+ Records the start time of an execution.
+ """
+ self._start_times.append(time.time())
+
+ def record_end(self, report):
+ """
+ Records the end time of an execution.
+ """
+ self._end_times.append(time.time())
+ self._reports.append(report)
+
+ def summarize_latest(self, sb):
+ """
+ Returns a summary of the latest execution of the group and appends a
+ summary of that execution onto the string builder 'sb'.
+ """
+ return self._summarize_execution(-1, sb)
+
+ def summarize(self, sb):
+ """
+ Returns a summary of the execution(s) of the group and appends a
+ summary of the execution(s) onto the string builder 'sb'.
+ """
+
+ if not self._reports:
+ sb.append("No tests ran.")
+ return _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ if len(self._reports) == 1:
+ return self._summarize_execution(0, sb)
+
+ return self._summarize_repeated(sb)
+
+ def _summarize_repeated(self, sb):
+ """
+ Returns the summary information of all executions and appends
+ each execution's summary onto the string builder 'sb'. Also
+ appends information of how many repetitions there were.
+ """
+
+ num_iterations = len(self._reports)
+ total_time_taken = self._end_times[-1] - self._start_times[0]
+ sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+ for iteration in xrange(num_iterations):
+ # Summarize each execution as a bulleted list of results.
+ bulleter_sb = []
+ summary = self._summarize_execution(iteration, bulleter_sb)
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ for (i, line) in enumerate(bulleter_sb):
+ # Only bullet first line, indent others.
+ prefix = "* " if i == 0 else " "
+ sb.append(prefix + line)
+
+ return combined_summary
+
+ def _summarize_execution(self, iteration, sb):
+ """
+ Returns the summary information of the execution given by
+ 'iteration' and appends a summary of that execution onto the
+ string builder 'sb'.
+ """
+
+ report = self._reports[iteration]
+ time_taken = self._end_times[iteration] - self._start_times[iteration]
+
+ num_run = report.num_succeeded + report.num_errored + report.num_failed
+ num_skipped = len(self.tests) + report.num_dynamic - num_run
+
+ if report.num_succeeded == num_run and num_skipped == 0:
+ sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
+ return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
+
+ summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
+ report.num_failed, report.num_errored)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
+
+ if report.num_failed > 0:
+ sb.append("The following tests failed (with exit code):")
+ for test_info in report.get_failed():
+ sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
+
+ if report.num_errored > 0:
+ sb.append("The following tests had errors:")
+ for test_info in report.get_errored():
+ sb.append(" %s" % (test_info.test_id))
+
+ return summary
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
new file mode 100644
index 00000000000..df387cc3323
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
@@ -0,0 +1,88 @@
+"""
+Helper functions.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+import yaml
+
+
+def default_if_none(value, default):
+ return value if value is not None else default
+
+
+def is_string_list(lst):
+ """
+ Returns true if 'lst' is a list of strings, and false otherwise.
+ """
+ return isinstance(lst, list) and all(isinstance(x, basestring) for x in lst)
+
+
+def is_string_set(value):
+ """
+ Returns true if 'value' is a set of strings, and false otherwise.
+ """
+ return isinstance(value, set) and all(isinstance(x, basestring) for x in value)
+
+
+def is_js_file(filename):
+ """
+ Returns true if 'filename' ends in .js, and false otherwise.
+ """
+ return os.path.splitext(filename)[1] == ".js"
+
+
+def is_yaml_file(filename):
+ """
+ Returns true if 'filename' ends in .yml or .yaml, and false
+ otherwise.
+ """
+ return os.path.splitext(filename)[1] in (".yaml", ".yml")
+
+
+def load_yaml_file(filename):
+ """
+ Attempts to read 'filename' as YAML.
+ """
+ try:
+ with open(filename, "r") as fp:
+ return yaml.safe_load(fp)
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid YAML: %s" % (filename, err))
+
+
+def dump_yaml(value):
+ """
+ Returns 'value' formatted as YAML.
+ """
+ # Use block (indented) style for formatting YAML.
+ return yaml.safe_dump(value, default_flow_style=False).rstrip()
+
+def load_yaml(value):
+ """
+ Attempts to parse 'value' as YAML.
+ """
+ try:
+ return yaml.safe_load(value)
+ except yaml.YAMLError as err:
+ raise ValueError("Attempted to parse invalid YAML value '%s': %s" % (value, err))
+
+
+def new_mongo_client(port, read_preference=pymongo.ReadPreference.PRIMARY, timeout_millis=30000):
+ """
+ Returns a pymongo.MongoClient connected on 'port' with a read
+ preference of 'read_preference'.
+
+ The PyMongo driver will wait up to 'timeout_millis' milliseconds
+ before concluding that the server is unavailable.
+ """
+
+ kwargs = {"connectTimeoutMS": timeout_millis}
+ if pymongo.version_tuple[0] >= 3:
+ kwargs["serverSelectionTimeoutMS"] = timeout_millis
+ kwargs["connect"] = True
+
+ return pymongo.MongoClient(port=port, read_preference=read_preference, **kwargs)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
new file mode 100644
index 00000000000..644ebfe3e38
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
@@ -0,0 +1,202 @@
+"""
+Filename globbing utility.
+"""
+
+from __future__ import absolute_import
+
+import glob as _glob
+import os
+import os.path
+import re
+
+
+_GLOBSTAR = "**"
+_CONTAINS_GLOB_PATTERN = re.compile("[*?[]")
+
+
+def is_glob_pattern(s):
+ """
+ Returns true if 's' represents a glob pattern, and false otherwise.
+ """
+
+ # Copied from glob.has_magic().
+ return _CONTAINS_GLOB_PATTERN.search(s) is not None
+
+
+def glob(globbed_pathname):
+ """
+ Return a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ return list(iglob(globbed_pathname))
+
+
+def iglob(globbed_pathname):
+ """
+ Emit a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ parts = _split_path(globbed_pathname)
+ parts = _canonicalize(parts)
+
+ index = _find_globstar(parts)
+ if index == -1:
+ for pathname in _glob.iglob(globbed_pathname):
+ # Normalize 'pathname' so exact string comparison can be used later.
+ yield os.path.normpath(pathname)
+ return
+
+ # **, **/, or **/a
+ if index == 0:
+ expand = _expand_curdir
+
+ # a/** or a/**/ or a/**/b
+ else:
+ expand = _expand
+
+ prefix_parts = parts[:index]
+ suffix_parts = parts[index + 1:]
+
+ prefix = os.path.join(*prefix_parts) if prefix_parts else os.curdir
+ suffix = os.path.join(*suffix_parts) if suffix_parts else ""
+
+ for (kind, path) in expand(prefix):
+ if not suffix_parts:
+ yield path
+
+ # Avoid following symlinks to avoid an infinite loop
+ elif suffix_parts and kind == "dir" and not os.path.islink(path):
+ path = os.path.join(path, suffix)
+ for pathname in iglob(path):
+ yield pathname
+
+
+def _split_path(pathname):
+ """
+ Return 'pathname' as a list of path components.
+ """
+
+ parts = []
+
+ while True:
+ (dirname, basename) = os.path.split(pathname)
+ parts.append(basename)
+ if pathname == dirname:
+ parts.append(dirname)
+ break
+ if not dirname:
+ break
+ pathname = dirname
+
+ parts.reverse()
+ return parts
+
+
+def _canonicalize(parts):
+ """
+ Return a copy of 'parts' with consecutive "**"s coalesced.
+ Raise a ValueError for unsupported uses of "**".
+ """
+
+ res = []
+
+ prev_was_globstar = False
+ for p in parts:
+ if p == _GLOBSTAR:
+ # Skip consecutive **'s
+ if not prev_was_globstar:
+ prev_was_globstar = True
+ res.append(p)
+ elif _GLOBSTAR in p: # a/b**/c or a/**b/c
+ raise ValueError("Can only specify glob patterns of the form a/**/b")
+ else:
+ prev_was_globstar = False
+ res.append(p)
+
+ return res
+
+
+def _find_globstar(parts):
+ """
+ Return the index of the first occurrence of "**" in 'parts'.
+ Return -1 if "**" is not found in the list.
+ """
+
+ for (i, p) in enumerate(parts):
+ if p == _GLOBSTAR:
+ return i
+ return -1
+
+
+def _list_dir(pathname):
+ """
+ Return a pair of the subdirectory names and filenames immediately
+ contained within the 'pathname' directory.
+
+ If 'pathname' does not exist, then None is returned.
+ """
+
+ try:
+ (_root, dirs, files) = os.walk(pathname).next()
+ return (dirs, files)
+ except StopIteration:
+ return None # 'pathname' directory does not exist
+
+
+def _expand(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ if os.path.basename(pathname):
+ yield ("dir", os.path.join(pathname, ""))
+
+ for f in files:
+ path = os.path.join(pathname, f)
+ yield ("file", path)
+
+ for d in dirs:
+ path = os.path.join(pathname, d)
+ for x in _expand(path):
+ yield x
+
+
+def _expand_curdir(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+
+ The returned pathnames omit a "./" prefix.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ yield ("dir", "")
+
+ for f in files:
+ yield ("file", f)
+
+ for d in dirs:
+ for x in _expand(d):
+ yield x
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
new file mode 100644
index 00000000000..18da7885820
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
@@ -0,0 +1,78 @@
+"""
+Utility for parsing JS comments.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+import yaml
+
+
+# TODO: use a more robust regular expression for matching tags
+_JSTEST_TAGS_RE = re.compile(r".*@tags\s*:\s*(\[[^\]]*\])", re.DOTALL)
+
+
+def get_tags(pathname):
+ """
+ Returns the list of tags found in the (JS-style) comments of
+ 'pathname'. The definition can span multiple lines, use unquoted,
+ single-quoted, or double-quoted strings, and use the '#' character
+ for inline commenting.
+
+ e.g.
+
+ /**
+ * @tags: [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+ */
+ """
+
+ with open(pathname) as fp:
+ match = _JSTEST_TAGS_RE.match(fp.read())
+ if match:
+ try:
+ # TODO: it might be worth supporting the block (indented) style of YAML lists in
+ # addition to the flow (bracketed) style
+ tags = yaml.safe_load(_strip_jscomments(match.group(1)))
+ if not isinstance(tags, list) and all(isinstance(tag, basestring) for tag in tags):
+ raise TypeError("Expected a list of string tags, but got '%s'" % (tags))
+ return tags
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid tags (expected YAML): %s"
+ % (pathname, err))
+
+ return []
+
+
+def _strip_jscomments(s):
+ """
+ Given a string 's' that represents the contents after the "@tags:"
+ annotation in the JS file, this function returns a string that can
+ be converted to YAML.
+
+ e.g.
+
+ [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+
+ If the //-style JS comments were used, then the example remains the,
+ same except with the '*' character is replaced by '//'.
+ """
+
+ yaml_lines = []
+
+ for line in s.splitlines():
+ # Remove leading whitespace and symbols that commonly appear in JS comments.
+ line = line.lstrip("\t ").lstrip("*/")
+ yaml_lines.append(line)
+
+ return "\n".join(yaml_lines)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/queue.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/queue.py
new file mode 100644
index 00000000000..80da5e2cc66
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/queue.py
@@ -0,0 +1,52 @@
+"""
+Extension to the Queue.Queue class.
+
+Added support for the join() method to take a timeout. This is necessary
+in order for KeyboardInterrupt exceptions to get propagated.
+
+See https://bugs.python.org/issue1167930 for more details.
+"""
+
+from __future__ import absolute_import
+
+import Queue
+import time
+
+
+# Exception that is raised when get_nowait() is called on an empty Queue.
+Empty = Queue.Empty
+
+
+class Queue(Queue.Queue):
+ """
+ A multi-producer, multi-consumer queue.
+ """
+
+ def join(self, timeout=None):
+ """
+ Wait until all items in the queue have been retrieved and processed,
+ or until 'timeout' seconds have passed.
+
+ The count of unfinished tasks is incremented whenever an item is added
+ to the queue. The count is decremented whenever task_done() is called
+ to indicate that all work on the retrieved item was completed.
+
+ When the number of unfinished tasks reaches zero, True is returned.
+ If the number of unfinished tasks remains nonzero after 'timeout'
+ seconds have passed, then False is returned.
+ """
+ with self.all_tasks_done:
+ if timeout is None:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+ elif timeout < 0:
+ raise ValueError("timeout must be a nonnegative number")
+ else:
+ # Pass timeout down to lock acquisition
+ deadline = time.time() + timeout
+ while self.unfinished_tasks:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ return False
+ self.all_tasks_done.wait(remaining)
+ return True
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/timer.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/timer.py
new file mode 100644
index 00000000000..80531d5db5c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/resmokelib/utils/timer.py
@@ -0,0 +1,125 @@
+"""
+Alternative to the threading.Timer class.
+
+Enables a timer to be restarted without needing to construct a new thread
+each time. This is necessary to execute periodic actions, e.g. flushing
+log messages to buildlogger, while avoiding errors related to "can't start
+new thread" that would otherwise occur on Windows.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class AlarmClock(threading.Thread):
+ """
+ Calls a function after a specified number of seconds.
+ """
+
+ def __init__(self, interval, func, args=None, kwargs=None):
+ """
+ Initializes the timer with a function to periodically execute.
+ """
+
+ threading.Thread.__init__(self)
+
+ # A non-dismissed timer should not prevent the program from exiting
+ self.daemon = True
+
+ self.interval = interval
+ self.func = func
+ self.args = args if args is not None else []
+ self.kwargs = kwargs if kwargs is not None else {}
+
+ self.lock = threading.Lock()
+ self.cond = threading.Condition(self.lock)
+
+ self.snoozed = False # canceled for one execution
+ self.dismissed = False # canceled for all time
+ self.restarted = False
+
+ def dismiss(self):
+ """
+ Disables the timer.
+ """
+
+ with self.lock:
+ self.dismissed = True
+ self.cond.notify_all()
+
+ self.join() # Tidy up the started thread.
+
+ cancel = dismiss # Expose API compatible with that of threading.Timer.
+
+ def snooze(self):
+ """
+ Skips the next execution of 'func' if it has not already started.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be snoozed if it has been dismissed")
+
+ self.snoozed = True
+ self.restarted = False
+ self.cond.notify_all()
+
+ def reset(self):
+ """
+ Restarts the timer, causing it to wait 'interval' seconds before calling
+ 'func' again.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be reset if it has been dismissed")
+
+ if not self.snoozed:
+ raise ValueError("Timer cannot be reset if it has not been snoozed")
+
+ self.restarted = True
+ self.cond.notify_all()
+
+ def run(self):
+ """
+ Repeatedly calls 'func' with a delay of 'interval' seconds between executions.
+
+ If the timer is snoozed before 'func' is called, then it waits to be reset.
+ After it has been reset, the timer will again wait 'interval' seconds and
+ then try to call 'func'.
+
+ If the timer is dismissed, then no subsequent executions of 'func' are made.
+ """
+
+ while True:
+ with self.lock:
+ if self.dismissed:
+ return
+
+ # Wait for the specified amount of time.
+ self.cond.wait(self.interval)
+
+ if self.dismissed:
+ return
+
+ # If the timer was snoozed, then it should wait to be reset.
+ if self.snoozed:
+ while not self.restarted:
+ self.cond.wait()
+
+ if self.dismissed:
+ return
+
+ self.restarted = False
+ self.snoozed = False
+ continue
+
+ # Execute the function after the lock has been released to prevent potential deadlocks
+ # with the invoked function.
+ self.func(*self.args, **self.kwargs)
+
+ # Reacquire the lock.
+ with self.lock:
+ # Ignore snoozes that took place while the function was being executed.
+ self.snoozed = False
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py
new file mode 100755
index 00000000000..d2df4cd4a0a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+
+import re
+import sys
+import os
+import tempfile
+import urllib2
+import subprocess
+import tarfile
+import zipfile
+import shutil
+import errno
+# To ensure it exists on the system
+import gzip
+import argparse
+
+#
+# Useful script for installing multiple versions of MongoDB on a machine
+# Only really tested/works on Linux.
+#
+
+def version_tuple(version):
+ """Returns a version tuple that can be used for numeric sorting
+ of version strings such as '2.6.0-rc1' and '2.4.0'"""
+
+ RC_OFFSET = -100
+ version_parts = re.split(r'\.|-', version[0])
+
+ if version_parts[-1].startswith("rc"):
+ rc_part = version_parts.pop()
+ rc_part = rc_part.split('rc')[1]
+
+ # RC versions are weighted down to allow future RCs and general
+ # releases to be sorted in ascending order (e.g., 2.6.0-rc1,
+ # 2.6.0-rc2, 2.6.0).
+ version_parts.append(int(rc_part) + RC_OFFSET)
+ else:
+ # Non-RC releases have an extra 0 appended so version tuples like
+ # (2, 6, 0, -100) and (2, 6, 0, 0) sort in ascending order.
+ version_parts.append(0)
+
+ return tuple(map(int, version_parts))
+
+class MultiVersionDownloaderBase :
+
+ def download_version(self, version):
+
+ try:
+ os.makedirs(self.install_dir)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(self.install_dir):
+ pass
+ else: raise
+
+ url, full_version = self.gen_url(version)
+
+ # this extracts the filename portion of the URL, without the extension.
+ # for example: ttp://downloads.mongodb.org/osx/mongodb-osx-x86_64-2.4.12.tgz
+ # extract_dir will become mongodb-osx-x86_64-2.4.12
+ extract_dir = url.split("/")[-1][:-4]
+
+ # only download if we don't already have the directory
+ already_downloaded = os.path.isdir(os.path.join( self.install_dir, extract_dir))
+ if already_downloaded:
+ print "Skipping download for version %s (%s) since the dest already exists '%s'" \
+ % (version, full_version, extract_dir)
+ else:
+ temp_dir = tempfile.mkdtemp()
+ temp_file = tempfile.mktemp(suffix=".tgz")
+
+ print "Downloading data for version %s (%s) from %s..." % (version, full_version, url)
+
+ data = urllib2.urlopen(url)
+
+ with open(temp_file, 'wb') as f:
+ f.write(data.read())
+ print "Uncompressing data for version %s (%s)..." % (version, full_version)
+
+ try:
+ tf = tarfile.open(temp_file, 'r:gz')
+ tf.extractall(path=temp_dir)
+ tf.close()
+ except:
+ # support for windows
+ zfile = zipfile.ZipFile(temp_file)
+ try:
+ if not os.path.exists(temp_dir):
+ os.makedirs(temp_dir)
+ for name in zfile.namelist():
+ _, filename = os.path.split(name)
+ print "Decompressing " + filename + " on " + temp_dir
+ zfile.extract(name, temp_dir)
+ except:
+ zfile.close()
+ raise
+ zfile.close()
+ temp_install_dir = os.path.join(temp_dir, extract_dir)
+ try:
+ os.stat(temp_install_dir)
+ except:
+ dir = os.listdir(temp_dir)
+ # TODO confirm that there is one and only one directory entry
+ os.rename(os.path.join(temp_dir,dir[0]),temp_install_dir)
+ shutil.move(temp_install_dir, self.install_dir)
+ shutil.rmtree(temp_dir)
+ try:
+ os.remove(temp_file)
+ except Exception as e:
+ print e
+ pass
+ self.symlink_version(version, os.path.abspath(os.path.join(self.install_dir, extract_dir)))
+
+ def symlink_version(self, version, installed_dir):
+
+ try:
+ os.makedirs(self.link_dir)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(self.link_dir):
+ pass
+ else: raise
+
+ for executable in os.listdir(os.path.join(installed_dir, "bin")):
+ link_name = "%s-%s" % (executable, version)
+ # support for windows
+ if executable.endswith(".exe") or executable.endswith(".pdb"):
+ link_name = "%s-%s.%s" % (executable[:-4], version, executable[len(executable)-3:])
+
+ try:
+ os.symlink(os.path.join(installed_dir, "bin", executable),\
+ os.path.join(self.link_dir, link_name))
+ except Exception as exc:
+ try:
+ # support for windows
+ shutil.copy2(os.path.join(installed_dir, "bin", executable),\
+ os.path.join(self.link_dir, link_name))
+ except:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+class MultiVersionDownloader(MultiVersionDownloaderBase) :
+
+ def __init__(self, install_dir, link_dir, platform):
+ self.install_dir = install_dir
+ self.link_dir = link_dir
+ match = re.compile("(.*)\/(.*)").match(platform)
+ self.platform = match.group(1)
+ self.arch = match.group(2)
+ self._links = None
+
+ @property
+ def links(self):
+ if self._links is None:
+ self._links = self.download_links()
+ return self._links
+
+ def gen_url(self, version):
+ urls = []
+ for link_version, link_url in self.links.iteritems():
+ if link_version.startswith(version):
+ # If we have a "-" in our version, exact match only
+ if version.find("-") >= 0:
+ if link_version != version: continue
+ elif link_version.find("-") >= 0:
+ continue
+
+ urls.append((link_version, link_url))
+
+ if len(urls) == 0:
+ raise Exception("Cannot find a link for version %s, versions %s found." \
+ % (version, self.links))
+
+ urls.sort(key=version_tuple)
+ full_version = urls[-1][0]
+ url = urls[-1][1]
+ return url, full_version
+
+ def download_links(self):
+ href = "http://dl.mongodb.org/dl/%s/%s" \
+ % (self.platform, self.arch)
+
+ html = urllib2.urlopen(href).read()
+ links = {}
+ for line in html.split():
+ match = None
+ for ext in ["tgz", "zip"]:
+ match = re.compile("http:\/\/downloads\.mongodb\.org\/%s/mongodb-%s-%s-([^\"]*)\.%s" \
+ % (self.platform, self.platform, self.arch, ext)).search(line)
+ if match != None:
+ break
+
+ if match == None:
+ continue
+ link = match.group(0)
+ version = match.group(1)
+ links[version] = link
+
+ return links
+
+
+class LatestMultiVersionDownloader(MultiVersionDownloaderBase) :
+
+ def __init__(self, install_dir, link_dir, platform, use_ssl, os):
+ self.install_dir = install_dir
+ self.link_dir = link_dir
+ match = re.compile("(.*)\/(.*)").match(platform)
+ self.platform = match.group(1)
+ self.arch = match.group(2)
+ self._links = None
+ self.use_ssl = use_ssl
+ self.os = os
+
+ def gen_url(self, version):
+ ext = "tgz"
+ if "win" in self.platform:
+ ext = "zip"
+ if self.use_ssl:
+ if version == "2.4":
+ enterprise_string = "subscription"
+ else:
+ enterprise_string = "enterprise"
+ full_version = self.os + "-v" + version + "-latest"
+ url = "http://downloads.10gen.com/%s/mongodb-%s-%s-%s-%s.%s" % ( self.platform, self.platform, self.arch, enterprise_string, full_version, ext )
+ else:
+ full_version = "v" + version + "-latest"
+ url = "http://downloads.mongodb.org/%s/mongodb-%s-%s-%s.%s" % ( self.platform, self.platform, self.arch, full_version, ext )
+ return url, full_version
+
+CL_HELP_MESSAGE = \
+"""
+Downloads and installs particular mongodb versions (each binary is renamed to include its version)
+into an install directory and symlinks the binaries with versions to another directory.
+
+Usage: setup_multiversion_mongodb.py INSTALL_DIR LINK_DIR PLATFORM_AND_ARCH VERSION1 [VERSION2 VERSION3 ...]
+
+Ex: setup_multiversion_mongodb.py ./install ./link "Linux/x86_64" "2.0.6" "2.0.3-rc0" "2.0" "2.2" "2.3"
+Ex: setup_multiversion_mongodb.py ./install ./link "OSX/x86_64" "2.4" "2.2"
+
+After running the script you will have a directory structure like this:
+./install/[mongodb-osx-x86_64-2.4.9, mongodb-osx-x86_64-2.2.7]
+./link/[mongod-2.4.9, mongod-2.2.7, mongo-2.4.9...]
+
+You should then add ./link/ to your path so multi-version tests will work.
+
+Note: If "rc" is included in the version name, we'll use the exact rc, otherwise we'll pull the highest non-rc
+version compatible with the version specified.
+"""
+
+def parse_cl_args(args):
+
+ parser = argparse.ArgumentParser(description=CL_HELP_MESSAGE)
+
+ def raise_exception(msg):
+ print CL_HELP_MESSAGE
+ raise Exception(msg)
+
+ parser.add_argument('install_dir', action="store" )
+ parser.add_argument('link_dir', action="store" )
+ parser.add_argument('platform_and_arch', action="store" )
+ parser.add_argument('--latest', action="store_true" )
+ parser.add_argument('--use-ssl', action="store_true" )
+ parser.add_argument('--os', action="store" )
+ parser.add_argument('version', action="store", nargs="+" )
+
+ args = parser.parse_args()
+
+ if re.compile(".*\/.*").match(args.platform_and_arch) == None:
+ raise_exception("PLATFORM_AND_ARCH isn't of the correct format")
+
+ if args.latest:
+ if not args.os:
+ raise_exception("using --use-ssl requires an --os parameter")
+ return (LatestMultiVersionDownloader(args.install_dir, args.link_dir, args.platform_and_arch, args.use_ssl, args.os), args.version)
+ else:
+ if args.use_ssl:
+ raise_exception("you can only use --use-ssl when using --latest")
+ return (MultiVersionDownloader(args.install_dir, args.link_dir, args.platform_and_arch), args.version)
+
+def main():
+
+ downloader, versions = parse_cl_args(sys.argv[1:])
+
+ for version in versions:
+ downloader.download_version(version)
+
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/smoke.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/smoke.py
new file mode 100755
index 00000000000..d486042110e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/smoke.py
@@ -0,0 +1,1485 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test"),
+# don't take arguments for the dbpath, but unconditionally use
+# "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+import logging
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
+
+from buildscripts.resmokelib.core import pipe
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not (small_oplog or small_oplog_rs):
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ self.job_object = None
+ self._inner_proc_pid = None
+ self._stdout_pipe = None
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are always set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ else:
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ if self.kwargs.get('wiredtiger_engine_config_string'):
+ argv += ["--wiredTigerEngineConfigString", self.kwargs.get('wiredtiger_engine_config_string')]
+ if self.kwargs.get('wiredtiger_collection_config_string'):
+ argv += ["--wiredTigerCollectionConfigString", self.kwargs.get('wiredtiger_collection_config_string')]
+ if self.kwargs.get('wiredtiger_index_config_string'):
+ argv += ["--wiredTigerIndexConfigString", self.kwargs.get('wiredtiger_index_config_string')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'SCRAM-SHA-1')
+ if authMechanism != 'SCRAM-SHA-1':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ # If the mongod process is spawned under buildlogger.py, then the first line of output
+ # should include the pid of the underlying mongod process. If smoke.py didn't create its own
+ # job object because it is already inside one, then the pid is used to attempt to terminate
+ # the underlying mongod process.
+ first_line = self.proc.stdout.readline()
+ match = re.search("^\[buildlogger.py\] pid: (?P<pid>[0-9]+)$", first_line.rstrip())
+ if match is not None:
+ self._inner_proc_pid = int(match.group("pid"))
+ else:
+ # The first line of output didn't include the pid of the underlying mongod process. We
+ # write the first line of output to smoke.py's stdout to ensure the message doesn't get
+ # lost since it's possible that buildlogger.py isn't being used.
+ sys.stdout.write(first_line)
+
+ logger = logging.Logger("", level=logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(fmt="%(message)s"))
+ logger.addHandler(handler)
+
+ self._stdout_pipe = pipe.LoggerPipe(logger, logging.INFO, self.proc.stdout)
+ self._stdout_pipe.wait_until_started()
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find({}, ["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On Windows, this
+ method also assigns the started process to a job object if a new
+ one was created. This ensures that any child processes of this
+ process can be killed with a single call to TerminateJobObject
+ (see self.stop()).
+ """
+
+ creation_flags = 0
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+ import win32process
+
+ # Don't create a job object if the current process is already inside one.
+ if not win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ proc = Popen(argv, creationflags=creation_flags, stdout=PIPE, stderr=None, bufsize=0)
+
+ if self.job_object is not None:
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32" and self.job_object is not None:
+ # If smoke.py created its own job object, then we clean up the spawned processes by
+ # terminating it.
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif os.sys.platform == "win32":
+ # If smoke.py didn't create its own job object, then we attempt to clean up the
+ # spawned processes by terminating them individually.
+ import win32api
+ import win32con
+ import win32event
+ import win32process
+ import winerror
+
+ def win32_terminate(handle):
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python
+ # 2.7 because earlier versions do not catch exceptions.
+ try:
+ win32process.TerminateProcess(handle, -1)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has
+ # already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ # Terminate the mongod process underlying buildlogger.py if one exists.
+ if self._inner_proc_pid is not None:
+ # The PROCESS_TERMINATE privilege is necessary to call TerminateProcess() and
+ # the SYNCHRONIZE privilege is necessary to call WaitForSingleObject(). See
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx
+ # for more details.
+ required_access = win32con.PROCESS_TERMINATE | win32con.SYNCHRONIZE
+ inner_proc_handle = win32api.OpenProcess(required_access,
+ False,
+ self._inner_proc_pid)
+ try:
+ win32_terminate(inner_proc_handle)
+ win32event.WaitForSingleObject(inner_proc_handle, win32event.INFINITE)
+ finally:
+ win32api.CloseHandle(inner_proc_handle)
+
+ win32_terminate(self.proc._handle)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+
+ if self._stdout_pipe is not None:
+ self._stdout_pipe.wait_until_finished()
+
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ # Fail hard if mongod terminates with an error. That might indicate that an
+ # instrumented build (e.g. LSAN) has detected an error. For now we aren't doing this on
+ # windows because the exit code seems to be unpredictable. We don't have LSAN there
+ # anyway.
+ retcode = self.proc.returncode
+ if os.sys.platform != "win32" and retcode != 0:
+ raise(Exception('mongod process exited with non-zero code %d' % retcode))
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ master.wait_for_repl()
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog or small_oplog_rs: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("sharding", "copydb_from_mongos.js"), # SERVER-13080
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("jstests", "bench_test_insert.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ("core", "bench_test_insert.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe"]:
+ argv = [path]
+ # default data directory for dbtest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+
+ if storage_engine:
+ argv.extend(["--storageEngine", storage_engine])
+ if wiredtiger_engine_config_string:
+ argv.extend(["--wiredTigerEngineConfigString", wiredtiger_engine_config_string])
+ if wiredtiger_collection_config_string:
+ argv.extend(["--wiredTigerCollectionConfigString", wiredtiger_collection_config_string])
+ if wiredtiger_index_config_string:
+ argv.extend(["--wiredTigerIndexConfigString", wiredtiger_index_config_string])
+
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.storageEngine = "' + ternary( storage_engine, storage_engine, "" ) + '";' + \
+ 'TestData.wiredTigerEngineConfigString = "' + ternary( wiredtiger_engine_config_string, wiredtiger_engine_config_string, "" ) + '";' + \
+ 'TestData.wiredTigerCollectionConfigString = "' + ternary( wiredtiger_collection_config_string, wiredtiger_collection_config_string, "" ) + '";' + \
+ 'TestData.wiredTigerIndexConfigString = "' + ternary( wiredtiger_index_config_string, wiredtiger_index_config_string, "" ) + '";' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ if os.getenv('SMOKE_EVAL') is not None:
+ evalString += os.getenv('SMOKE_EVAL')
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True,
+ small_oplog=True,
+ small_oplog_rs=False,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=True,
+ small_oplog=False,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ # Wait for primary and secondary to finish initial sync and election
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ if not ismaster:
+ print "waiting for primary to be available ..."
+ time.sleep(.2)
+
+ secondaryUp = False
+ sConn = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED);
+ while not secondaryUp:
+ result = sConn.admin.command("ismaster");
+ secondaryUp = result["secondary"]
+ if not secondaryUp:
+ print "waiting for secondary to be available ..."
+ time.sleep(.2)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+
+suiteGlobalConfig = { "files": ("files/*.js", False),
+ "restore": ("restore/*.js", False),
+ "stat": ("stat/*.js", False),
+ "top": ("top/*.js", False),
+ "bson": ("bson/*.js", False),
+ "export": ("export/*.js", False),
+ "dump": ("dump/*.js", False),
+ "oplog": ("oplog/*.js", False),
+ "import": ("import/*.js", False),
+ "ssl": ("ssl/*.js", False),
+ "unstable": ("unstable/*.js", False),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'concurrency',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine, wiredtiger_engine_config_string, wiredtiger_collection_config_string, wiredtiger_index_config_string
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ wiredtiger_engine_config_string = options.wiredtiger_engine_config_string
+ wiredtiger_collection_config_string = options.wiredtiger_collection_config_string
+ wiredtiger_index_config_string = options.wiredtiger_index_config_string
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine, wiredtiger_engine_config_string, wiredtiger_collection_config_string, wiredtiger_index_config_string
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--wiredTigerEngineConfig', dest='wiredtiger_engine_config_string', default=None,
+ help='Wired Tiger configuration to pass through to mongod')
+ parser.add_option('--wiredTigerCollectionConfig', dest='wiredtiger_collection_config_string', default=None,
+ help='Wired Tiger collection configuration to pass through to mongod')
+ parser.add_option('--wiredTigerIndexConfig', dest='wiredtiger_index_config_string', default=None,
+ help='Wired Tiger index configuration to pass through to mongod')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='SCRAM-SHA-1',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=(1 if 'detect_leaks=1' in os.getenv("ASAN_OPTIONS", "") else 20),
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/utils.py b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types.js
new file mode 100644
index 00000000000..70d169685c8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types.js
@@ -0,0 +1,33 @@
+// This test runs bsondump on a .bson file containing non-deprecated BSON types
+// and makes sure their debug type values exist in the output.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--type=debug", "jstests/bson/testdata/all_types.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+
+ var results;
+ assert.eq.soon(22, function() {
+ results = rawMongoProgramOutput();
+ return (results.match(/--- new object ---/g) || []).length;
+ }, "should see all documents from the test data");
+
+ assert.strContains("type: 1", results, "bson type '1' should be present in the debug output");
+ assert.strContains("type: 2", results, "bson type '2' should be present in the debug output");
+ assert.strContains("type: 3", results, "bson type '3' should be present in the debug output");
+ assert.strContains("type: 4", results, "bson type '4' should be present in the debug output");
+ assert.strContains("type: 5", results, "bson type '5' should be present in the debug output");
+ assert.strContains("type: 6", results, "bson type '6' should be present in the debug output");
+ assert.strContains("type: 7", results, "bson type '7' should be present in the debug output");
+ assert.strContains("type: 8", results, "bson type '8' should be present in the debug output");
+ assert.strContains("type: 9", results, "bson type '9' should be present in the debug output");
+ assert.strContains("type: 10", results, "bson type '10' should be present in the debug output");
+ assert.strContains("type: 11", results, "bson type '11' should be present in the debug output");
+ assert.strContains("type: 12", results, "bson type '12' should be present in the debug output");
+ assert.strContains("type: 13", results, "bson type '13' should be present in the debug output");
+ assert.strContains("type: 17", results, "bson type '17' should be present in the debug output");
+ assert.strContains("type: 18", results, "bson type '18' should be present in the debug output");
+ assert.strContains("type: -1", results, "bson type '-1' should be present in the debug output");
+ assert.strContains("type: 127", results, "bson type '127' should be present in the debug output");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types_json.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types_json.js
new file mode 100644
index 00000000000..6c8e69607dc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/all_types_json.js
@@ -0,0 +1,29 @@
+// This test runs bsondump on a .bson file containing non-deprecated BSON types
+// and makes sure their JSON type representations exist in the output.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--type=json", "jstests/bson/testdata/all_types.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+
+ assert.strContains.soon("20 objects found", rawMongoProgramOutput,
+ "should print out all top-level documents from the test data");
+
+ var results = rawMongoProgramOutput();
+ assert.strContains("$binary", results, "bson type 'binary' should be present in the debug output");
+ assert.strContains("$date", results, "bson type 'date' should be present in the debug output");
+ assert.strContains("$timestamp", results, "bson type 'timestamp' should be present in the debug output");
+ assert.strContains("$regex", results, "bson type 'regex' should be present in the debug output");
+ assert.strContains("$oid", results, "bson type 'oid' should be present in the debug output");
+ assert.strContains("$undefined", results, "bson type 'undefined' should be present in the debug output");
+ assert.strContains("$minKey", results, "bson type 'min' should be present in the debug output");
+ assert.strContains("$maxKey", results, "bson type 'max' should be present in the debug output");
+ assert.strContains("$numberLong", results, "bson type 'long' should be present in the debug output");
+ assert.strContains("$ref", results, "bson type 'dbref' should be present in the debug output");
+ assert.strContains("$id", results, "bson type 'dbref' should be present in the debug output");
+ assert.strContains("$code", results, "bson type 'javascript' should be present in the debug output");
+ assert.strContains("null", results, "bson type 'null' should be present in the debug output");
+ assert.strContains("true", results, "bson type 'true' should be present in the debug output");
+ assert.strContains("false", results, "bson type 'false' should be present in the debug output");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bad_files.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bad_files.js
new file mode 100644
index 00000000000..7d734d0e47a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bad_files.js
@@ -0,0 +1,41 @@
+// This test makes sure that certain invalid BSON succeeds or fails
+// with both JSON and debug output types AND --objcheck
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/random_bytes.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given random bytes");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/bad_cstring.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a non-terminated cstring");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/bad_type.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a bad type value");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/partial_file.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given only the start of a file");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/invalid_field_name.bson");
+ assert.neq(x, 0, "bsondump should exit with an error given invalid field names");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/random_bytes.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given random bytes");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/bad_cstring.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a non-terminated cstring");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/bad_type.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a bad type value");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/partial_file.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given only the start of a file");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/invalid_field_name.bson");
+ assert.neq(x, 0, "bsondump should exit with an error given invalid field names");
+
+ // This should pass, but the content of the output might be erroneous
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/broken_array.bson");
+ assert.eq(x, 0, "bsondump should exit with success when given a bad array document");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/broken_array.bson");
+ assert.eq(x, 0, "bsondump should exit with success when given a bad array document");
+
+ // Make sure recoverable cases do not return an error by default
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "jstests/bson/testdata/bad_cstring.bson");
+ assert.eq(x, 0, "bsondump should not exit with an error when given a non-terminated cstring without --objcheck");
+ assert.strContains.soon("corrupted", rawMongoProgramOutput,
+ "one of the documents should have been labelled as corrupted");
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_broken_pipe.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_broken_pipe.js
new file mode 100644
index 00000000000..e96733320e7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_broken_pipe.js
@@ -0,0 +1,18 @@
+(function() {
+ var bsondumpArgs = ['bsondump', '--type=json', 'jstests/bson/testdata/all_types.bson'];
+ var ddArgs = ['dd', 'count=1000000', 'bs=1024', 'of=/dev/null'];
+ if (_isWindows()) {
+ bsondumpArgs[0] += '.exe';
+ }
+ bsondumpArgs.unshift('set -o pipefail', '&&', 'PATH=.:$PATH');
+
+ var ret = runProgram('bash', '-c', bsondumpArgs.concat('|', ddArgs).join(' '));
+ assert.eq(0, ret, "bash execution should succeed");
+
+ ddArgs = ['dd', 'count=0', 'bs=1', 'of=/dev/null'];
+ ret = runProgram('bash', '-c', bsondumpArgs.concat('|', ddArgs).join(' '));
+ assert.neq(0, ret, "bash execution should fail");
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/broken pipe|The pipe is being closed/);
+ }, 'should print an error message');
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_options.js
new file mode 100644
index 00000000000..f286eb39b45
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/bsondump_options.js
@@ -0,0 +1,57 @@
+// This test checks reasonable and unreasonable option configurations for bsondump
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var sampleFilepath = "jstests/bson/testdata/sample.bson";
+ var x = _runMongoProgram("bsondump", "--type=fake", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given a non-existent type");
+
+ x = _runMongoProgram("bsondump", "jstests/bson/testdata/asdfasdfasdf");
+ assert.neq(x, 0, "bsondump should exit with failure when given a non-existent file");
+
+ x = _runMongoProgram("bsondump", "--noobjcheck", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given --noobjcheck");
+
+ x = _runMongoProgram("bsondump", "--collection", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given --collection");
+
+ x = _runMongoProgram("bsondump", sampleFilepath, sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given multiple files");
+
+ x = _runMongoProgram("bsondump", '--bsonFile', sampleFilepath, sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given both an out file and a positional argument");
+
+ x = _runMongoProgram("bsondump", "-vvvv", sampleFilepath);
+ assert.eq(x, 0, "bsondump should exit with success when given verbosity");
+ x = _runMongoProgram("bsondump", "--verbose", sampleFilepath);
+ assert.eq(x, 0, "bsondump should exit with success when given verbosity");
+
+ clearRawMongoProgramOutput();
+ var pid = _startMongoProgram("bsondump", "--quiet", sampleFilepath);
+ assert.eq(waitProgram(pid), 0, "bsondump should exit with success when given --quiet");
+ assert.strContains.soon("I am a string", rawMongoProgramOutput,
+ "found docs should still be printed when --quiet is used");
+ assert.eq.soon(-1, function() {
+ return rawMongoProgramOutput()
+ .split("\n")
+ .filter(function(line) {
+ return line.indexOf("sh"+pid+"| ") === 0;
+ })
+ .join("\n")
+ .indexOf("found");
+ }, "only the found docs should be printed when --quiet is used");
+
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "--help");
+ assert.eq(x, 0, "bsondump should exit with success when given --help");
+ assert.strContains.soon("Usage", rawMongoProgramOutput,
+ "help text should be printed when given --help");
+
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "--version");
+ assert.eq(x, 0, "bsondump should exit with success when given --version");
+ assert.strContains.soon("version", rawMongoProgramOutput,
+ "version info should be printed when given --version");
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/deep_nested.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/deep_nested.js
new file mode 100644
index 00000000000..1a226c81fc3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/deep_nested.js
@@ -0,0 +1,8 @@
+// This test checks that bsondump can handle a deeply nested document without breaking
+
+(function() {
+ var x = _runMongoProgram("bsondump", "--type=json", "jstests/bson/testdata/deep_nested.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+ x = _runMongoProgram("bsondump", "--type=debug", "jstests/bson/testdata/deep_nested.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/output_file.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/output_file.js
new file mode 100644
index 00000000000..09d9d399e19
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/bson/output_file.js
@@ -0,0 +1,71 @@
+/**
+ * output_file.js
+ *
+ * This file tests outputting bsondump to a file when the input is from a file.
+ */
+
+(function() {
+ 'use strict';
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('bson_output_file');
+ var commonToolArgs = getCommonToolArguments();
+
+ // The db and collections we'll use.
+ var testDB = toolTest.db.getSiblingDB('test');
+ var destColl = testDB.bsondump;
+
+ // Test using a flag to specify the output file..
+ var exportTarget = 'bson_dump.json';
+ removeFile(exportTarget);
+
+ var ret = _runMongoProgram("bsondump",
+ "--type=json",
+ "--bsonFile", "jstests/bson/testdata/sample.bson",
+ "--outFile", exportTarget);
+ assert.eq(ret, 0, "bsondump should exit successfully with 0");
+
+ // Import the data into the destination collection to check correctness.
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'bsondump',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Make sure everything was dumped.
+ assert.eq(1, destColl.count({a: 1.0}));
+ assert.eq(1, destColl.count({a: 2.5}));
+ assert.eq(1, destColl.count({a: 4.0}));
+ assert.eq(1, destColl.count({a: 4.01}));
+
+
+ // Test using a positional argument to specify the output file.
+ removeFile(exportTarget);
+
+ ret = _runMongoProgram("bsondump",
+ "--type=json",
+ "--outFile", exportTarget,
+ "jstests/bson/testdata/sample.bson");
+ assert.eq(ret, 0, "bsondump should exit successfully with 0");
+
+ // Import the data into the destination collection to check correctness.
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'bsondump',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Make sure everything was dumped.
+ assert.eq(1, destColl.count({a: 1.0}));
+ assert.eq(1, destColl.count({a: 2.5}));
+ assert.eq(1, destColl.count({a: 4.0}));
+ assert.eq(1, destColl.count({a: 4.01}));
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/check_version.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/check_version.js
new file mode 100644
index 00000000000..cf151e41e75
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/check_version.js
@@ -0,0 +1,47 @@
+/**
+ * Given a MongoDB version, parses it into its major/minor/patch components,
+ * discounting '-pre' and '-rcX'. Useful for parsing the output of
+ * `db.version()` into an appropriate form for comparisons.
+ *
+ * Examples:
+ * getVersionComponents('2.7.8'); // { major: 2, minor: 7, patch: 8 }
+ * getVersionComponents('2.8.0-rc0'); // { major: 2, minor: 8, patch: 0 }
+ */
+var getVersionComponents = function(version) {
+ var splitVersion = version.split('.');
+ assert.eq(3, splitVersion.length);
+ var major = parseInt(splitVersion[0], 10);
+ var minor = parseInt(splitVersion[1], 10);
+
+ var patchEnd = splitVersion[2].indexOf('-') !== -1 ?
+ splitVersion[2].indexOf('-') :
+ undefined;
+ var patch = parseInt(splitVersion[2].substr(0, patchEnd));
+ return {
+ major: major,
+ minor: minor,
+ patch: patch,
+ };
+};
+
+/**
+ * Given two versions, returns true if the first version is >= the second.
+ *
+ * Examples:
+ * isAtLeastVersion('2.7.8', '2.7.8'); // true
+ * isAtLeastVersion('2.8.0-rc0', '2.7.8'); // true
+ * isAtLeastVersion('2.6.6', '2.7.8'); // false
+ * isAtLeastVersion('1.8.5', '2.7.8'); // false
+ */
+/* exported isAtLeastVersion */
+var isAtLeastVersion = function(serverVersion, checkVersion) {
+ serverVersion = getVersionComponents(serverVersion);
+ checkVersion = getVersionComponents(checkVersion);
+
+ return (checkVersion.major < serverVersion.major) ||
+ (checkVersion.major === serverVersion.major &&
+ checkVersion.minor < serverVersion.minor) ||
+ (checkVersion.major === serverVersion.major &&
+ checkVersion.minor === serverVersion.minor &&
+ checkVersion.patch <= serverVersion.patch);
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/topology_helper.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/topology_helper.js
new file mode 100644
index 00000000000..7480d6f0772
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/common/topology_helper.js
@@ -0,0 +1,187 @@
+// topology_helper.js; contains utility functions to run tests
+
+// auth related variables
+var authUser = 'user';
+var authPassword = 'password';
+var authArgs = [
+ '--authenticationDatabase', 'admin',
+ '--authenticationMechanism', 'SCRAM-SHA-1',
+ '-u', authUser,
+ '-p', authPassword
+];
+var keyFile = 'jstests/libs/key1';
+
+// topology startup settings
+var auth = {
+ name: 'auth',
+ args: authArgs,
+};
+
+var plain = {
+ name: 'plain',
+ args: [],
+};
+
+/* exported passthroughs */
+// passthroughs while running all tests
+var passthroughs = [plain, auth];
+
+/* helper functions */
+
+// runAuthSetup creates a user with root role on the admin database
+var runAuthSetup = function(topology) {
+ jsTest.log('Running auth setup');
+
+ var conn = topology.connection();
+ var db = conn.getDB('test');
+
+ db.getSiblingDB('admin').createUser({
+ user: authUser,
+ pwd: authPassword,
+ roles: ['root'],
+ });
+
+ assert.eq(db.getSiblingDB('admin').auth(authUser, authPassword), 1, 'authentication failed');
+};
+
+// logoutAdmin logs out the admin user so the topology helpers can auth
+// without causing a multi-auth situation.
+var logoutAdmin = function(topology) {
+ jsTest.log('Logging out admin');
+
+ var conn = topology.connection();
+ var db = conn.getDB('test');
+
+ db.getSiblingDB('admin').logout();
+};
+
+// buildStartupArgs constructs the proper object to be passed as arguments in
+// starting mongod
+var buildStartupArgs = function(passthrough) {
+ var startupArgs = {};
+ if (passthrough.name === auth.name) {
+ startupArgs.auth = '';
+ startupArgs.keyFile = keyFile;
+ }
+ return startupArgs;
+};
+
+// requiresAuth returns a boolean indicating if the passthrough requires authentication
+var requiresAuth = function(passthrough) {
+ return passthrough.name === auth.name;
+};
+
+/* standalone topology */
+/* exported standaloneTopology */
+var standaloneTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using standalone topology');
+
+ passthrough = passthrough || [];
+ var startupArgs = buildStartupArgs(passthrough);
+ startupArgs.port = allocatePorts(1)[0];
+ this.conn = MongoRunner.runMongod(startupArgs);
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ runAuthSetup(this);
+ this.didAuth = true;
+ }
+ return this;
+ },
+ connection: function() {
+ return this.conn;
+ },
+ stop: function() {
+ if (this.didAuth) {
+ logoutAdmin(this);
+ }
+ MongoRunner.stopMongod(this.conn);
+ },
+};
+
+
+/* replica set topology */
+/* exported replicaSetTopology */
+var replicaSetTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using replica set topology');
+
+ passthrough = passthrough || [];
+ var startupArgs = buildStartupArgs(passthrough);
+ startupArgs.name = testName;
+ startupArgs.nodes = 2;
+ this.replTest = new ReplSetTest(startupArgs);
+
+ // start the replica set
+ this.replTest.startSet();
+ jsTest.log('Started replica set');
+
+ // initiate the replica set with a default config
+ this.replTest.initiate();
+ jsTest.log('Initiated replica set');
+
+ // block till the set is fully operational
+ this.replTest.awaitSecondaryNodes();
+ jsTest.log('Replica set fully operational');
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ runAuthSetup(this);
+ this.didAuth = true;
+ }
+ return this;
+ },
+ connection: function() {
+ return this.replTest.getPrimary();
+ },
+ stop: function() {
+ if (this.didAuth) {
+ logoutAdmin(this);
+ }
+ this.replTest.stopSet();
+ },
+};
+
+
+/* sharded cluster topology */
+/* exported shardedClusterTopology */
+var shardedClusterTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using sharded cluster topology');
+
+ passthrough = passthrough || [];
+ var other = buildStartupArgs(passthrough);
+ var startupArgs = {};
+ startupArgs.name = testName;
+ startupArgs.mongos = 1;
+ startupArgs.shards = 1;
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ startupArgs.keyFile = keyFile;
+ startupArgs.other = {
+ shardOptions: other,
+ };
+ this.shardingTest = new ShardingTest(startupArgs);
+ runAuthSetup(this);
+ this.didAuth = true;
+ } else {
+ startupArgs.other = {
+ shardOptions: other,
+ };
+ this.shardingTest = new ShardingTest(startupArgs);
+ }
+ return this;
+ },
+ connection: function() {
+ return this.shardingTest.s;
+ },
+ stop: function() {
+ if (this.didAuth) {
+ logoutAdmin(this);
+ }
+ this.shardingTest.stop();
+ },
+};
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/archive_targets.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/archive_targets.js
new file mode 100644
index 00000000000..9bdf3b4c5b3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/archive_targets.js
@@ -0,0 +1,32 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (!target) {
+ return ["--archive=dump.archive"];
+ }
+ return ["--archive="+target];
+ };
+ }
+}());
+
+var getRestoreTarget;
+
+/* exported dump_targets */
+var dump_targets = "archive";
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (!target) {
+ return ["--archive=dump.archive"];
+ }
+ targetParts = target.split("/");
+ if (targetParts[0] === "dump") {
+ return ["--archive=dump.archive"];
+ }
+ return ["--archive="+targetParts[0]];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/auth_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/auth_28.config.js
new file mode 100644
index 00000000000..744bb237d79
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/auth_28.config.js
@@ -0,0 +1,38 @@
+/* exported getToolTest */
+var getToolTest;
+var AUTH_USER = 'passwordIsTaco';
+var AUTH_PASSWORD = 'Taco';
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ auth: '',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ var db = toolTest.startDB();
+
+ db.getSiblingDB('admin').createUser({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ roles: ['__system'],
+ });
+
+ db.getSiblingDB('admin').auth(AUTH_USER, AUTH_PASSWORD);
+
+ toolTest.authCommand = "db.getSiblingDB('admin').auth('" + AUTH_USER
+ + "', '" + AUTH_PASSWORD + "');";
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--authenticationDatabase', 'admin'
+ ];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/gzip_targets.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/gzip_targets.js
new file mode 100644
index 00000000000..d57b0c55954
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/gzip_targets.js
@@ -0,0 +1,36 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (target === undefined) {
+ return ["--gzip"];
+ }
+ if (target.indexOf(".bson", target.length - 5) !== -1) {
+ return ["--gzip", "--out="+target+".gz"];
+ }
+ return ["--gzip", "--out="+target];
+ };
+ }
+}());
+
+var dump_targets;
+if (!dump_targets) {
+ dump_targets = "gzip";
+}
+
+var getRestoreTarget;
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (target === undefined) {
+ return ["--gzip"];
+ }
+ if (target.indexOf(".bson", target.length - 5) !== -1) {
+ return ["--gzip", "--dir="+target+".gz"];
+ }
+ return ["--gzip", "--dir="+target];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.config.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.config.yml
new file mode 100644
index 00000000000..a2f7fdc5202
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.config.yml
@@ -0,0 +1,7 @@
+security:
+ authorization: enabled
+ sasl:
+ serviceName: mockservice
+ hostName: kdc.10gen.me
+setParameter:
+ authenticationMechanisms: GSSAPI
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.linux.sh b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.linux.sh
new file mode 100644
index 00000000000..d2f54971f1a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos.linux.sh
@@ -0,0 +1,5 @@
+echo "107.23.89.149 kdc.10gen.me" | sudo tee -a /etc/hosts
+echo "127.0.0.1 testserver.10gen.me" | sudo tee -a /etc/hosts
+sudo hostname "testserver.10gen.me"
+sudo cp jstests/libs/mockkrb5.conf /etc/krb5.conf
+kinit -p mockuser@10GEN.ME -k -t jstests/libs/mockuser.keytab
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28.config.js
new file mode 100644
index 00000000000..c00e8819e9a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28.config.js
@@ -0,0 +1,39 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var AUTH_USER = 'mockuser@10GEN.ME';
+
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ config: 'jstests/configs/kerberos.config.yml',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ var db = toolTest.startDB();
+
+ db.getSiblingDB('$external').createUser({
+ user: AUTH_USER,
+ roles: [{role: '__system', db: 'admin'}],
+ });
+
+ db.getSiblingDB('$external').auth({user: AUTH_USER, mechanism: 'GSSAPI', serviceName: 'mockservice', serviceHostname: 'kdc.10gen.me'});
+
+ toolTest.authCommand = "db.getSiblingDB('$external').auth({ user: '"
+ + AUTH_USER + "', mechanism: 'GSSAPI', serviceName: 'mockservice', serviceHostname: 'kdc.10gen.me' });";
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', 'mockuser@10GEN.ME',
+ '--authenticationDatabase', '$external',
+ '--authenticationMechanism', 'GSSAPI',
+ '--gssapiServiceName', 'mockservice',
+ '--gssapiHostName', 'kdc.10gen.me'
+ ];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js
new file mode 100644
index 00000000000..481862f23f3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js
@@ -0,0 +1,59 @@
+/** NOTE: this config uses a static Kerberos instance running on an EC2
+ * machine outside our security group. It should NOT be used for
+ * automated tests, because its a single instance and there's no
+ * automated way to generate more instances just yet. */
+
+/** NOTE: you need to add a registry entry for the MADHACKER.BIZ Kerberos
+ * realm before using this:
+ * cmd /c "REG ADD HKLM\SYSTEM\ControlSet001\Control\Lsa\Kerberos\Domains\MADHACKER.BIZ /v KdcNames /d karpov.madhacker.biz /t REG_MULTI_SZ /f"
+ */
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, {});
+ var db;
+
+ db = toolTest.db = new Mongo(AUTH_HOSTNAME + ':27017').getDB('test');
+
+ /** Overwrite so toolTest.runTool doesn't append --host */
+ ToolTest.prototype.runTool = function() {
+ arguments[0] = 'mongo' + arguments[0];
+ return runMongoProgram.apply(null, arguments);
+ };
+
+ db.getSiblingDB('$external').auth({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ mechanism: 'GSSAPI',
+ serviceName: 'mongodb',
+ serviceHostname: AUTH_HOSTNAME,
+ });
+
+ toolTest.authCommand = "db.getSiblingDB('$external').auth({ user: '"
+ + AUTH_USER + "', pwd: '" + AUTH_PASSWORD
+ + "', mechanism: 'GSSAPI', serviceName: 'mongodb', serviceHostname: '"
+ + AUTH_HOSTNAME + "' });";
+
+ toolTest.stop = function() {
+ print('No need to stop on Kerberos windows config. Test succeeded');
+ };
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--host', AUTH_HOSTNAME,
+ '--authenticationDatabase', '$external',
+ '--authenticationMechanism', 'GSSAPI',
+ '--gssapiServiceName', 'mongodb',
+ '--gssapiHostName', AUTH_HOSTNAME
+ ];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_26.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_26.config.js
new file mode 100644
index 00000000000..4f6cd93f8e3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_26.config.js
@@ -0,0 +1,19 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '2.6',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_28.config.js
new file mode 100644
index 00000000000..bf71e5ca079
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/plain_28.config.js
@@ -0,0 +1,21 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_28.config.js
new file mode 100644
index 00000000000..82e9a10891d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_28.config.js
@@ -0,0 +1,39 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var replTest = new ReplSetTest({
+ name: 'tool_replset',
+ nodes: 3,
+ oplogSize: 5,
+ });
+
+ replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ toolTest.m = master;
+ toolTest.db = master.getDB(name);
+ toolTest.port = replTest.getPort(master);
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ replTest.stopSet();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.isReplicaSet = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_auth_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_auth_28.config.js
new file mode 100644
index 00000000000..ed4f0c9b5e5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_auth_28.config.js
@@ -0,0 +1,58 @@
+/* exported getToolTest */
+var getToolTest;
+
+var AUTH_USER = 'passwordIsTaco';
+var AUTH_PASSWORD = 'Taco';
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var replTest = new ReplSetTest({
+ name: 'tool_replset',
+ nodes: 3,
+ oplogSize: 5,
+ auth: '',
+ keyFile: 'jstests/libs/key1',
+ });
+
+ nodes = replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ toolTest.m = master;
+ toolTest.db = master.getDB(name);
+ toolTest.port = replTest.getPort(master);
+
+ var db = toolTest.db;
+ db.getSiblingDB('admin').createUser({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ roles: ['__system'],
+ });
+
+ db.getSiblingDB('admin').auth(AUTH_USER, AUTH_PASSWORD);
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ replTest.stopSet();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.authCommand = 'db.getSiblingDB(\'admin\').auth(\'' +
+ AUTH_USER + '\', \'' + AUTH_PASSWORD + '\');';
+
+ toolTest.isReplicaSet = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--authenticationDatabase', 'admin'
+ ];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_single_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_single_28.config.js
new file mode 100644
index 00000000000..18caea4d214
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/replset_single_28.config.js
@@ -0,0 +1,39 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var replTest = new ReplSetTest({
+ name: 'tool_replset',
+ nodes: 1,
+ oplogSize: 5,
+ });
+
+ replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ toolTest.m = master;
+ toolTest.db = master.getDB(name);
+ toolTest.port = replTest.getPort(master);
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ replTest.stopSet();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.isReplicaSet = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/sharding_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/sharding_28.config.js
new file mode 100644
index 00000000000..3a29b394be5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/sharding_28.config.js
@@ -0,0 +1,40 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var shardingTest = new ShardingTest({name: name,
+ shards: 2,
+ verbose: 0,
+ mongos: 3,
+ other: {
+ chunksize: 1,
+ enableBalancer: 0
+ }
+ });
+ shardingTest.adminCommand({enablesharding: name});
+
+ toolTest.m = shardingTest.s0;
+ toolTest.db = shardingTest.getDB(name);
+ toolTest.port = shardingTest.s0.port;
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ shardingTest.stop();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.isSharded = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/ssl_28.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/ssl_28.config.js
new file mode 100644
index 00000000000..17c2fb492a0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/ssl_28.config.js
@@ -0,0 +1,26 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslCAFile: 'jstests/libs/ca.pem',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ toolTest.usesSSL = true;
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/client.pem'
+ ];
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/standard_dump_targets.config.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/standard_dump_targets.config.js
new file mode 100644
index 00000000000..fe68b171246
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/configs/standard_dump_targets.config.js
@@ -0,0 +1,30 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (target === undefined) {
+ return [];
+ }
+ return ["--out="+target];
+ };
+ }
+}());
+
+var dump_targets;
+if (!dump_targets) {
+ dump_targets = "standard";
+}
+
+var getRestoreTarget;
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (target === undefined) {
+ return [];
+ }
+ return ["--dir="+target];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/bad_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/bad_options.js
new file mode 100644
index 00000000000..d06d6bcabdd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/bad_options.js
@@ -0,0 +1,47 @@
+(function() {
+
+ // Tests running mongoexport with bad command line options.
+
+ jsTest.log('Testing running mongoexport with bad command line options');
+
+ var toolTest = new ToolTest('bad_options');
+ toolTest.startDB('foo');
+
+ // run mongoexport with a missing --collection argument
+ var ret = toolTest.runTool('export', '--db', 'test');
+ assert.neq(0, ret);
+
+ // run mongoexport with bad json as the --query
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--query', '{ hello }');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --skip
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{a: 1}', '--skip', 'jamesearljones');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --sort
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{ hello }');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --limit
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{a: 1}', '--limit', 'jamesearljones');
+ assert.neq(0, ret);
+
+ // run mongoexport with --query and --queryFile
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--query', '{a:1}', '--queryFile', 'jstests/export/testdata/query.json');
+ assert.neq(0, ret);
+
+ // run mongoexport with a --queryFile that doesn't exist
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--queryFile', 'jstests/nope');
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/basic_data.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/basic_data.js
new file mode 100644
index 00000000000..2128f2ac174
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/basic_data.js
@@ -0,0 +1,60 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with some basic data, and bringing it back
+ // in with import.
+
+ jsTest.log('Testing exporting, then importing, some basic data');
+
+ var toolTest = getToolTest('basic_data');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'basic_data_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data is correct
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/data_types.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/data_types.js
new file mode 100644
index 00000000000..99a506b2bf1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/data_types.js
@@ -0,0 +1,70 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with different data types, and bringing it back
+ // in with import.
+
+ jsTest.log('Testing exporting, then importing, different data types');
+
+ var toolTest = getToolTest('data_types');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'data_types_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data, of different types
+ testColl.insert({num: 1});
+ testColl.insert({flt: 1.0});
+ testColl.insert({str: '1'});
+ testColl.insert({obj: {a: 1}});
+ testColl.insert({arr: [0, 1]});
+ testColl.insert({bd: new BinData(0, '1234')});
+ testColl.insert({date: ISODate('2009-08-27T12:34:56.789')});
+ testColl.insert({ts: new Timestamp(1234, 5678)});
+ testColl.insert({rx: /foo*"bar"/i});
+ // sanity check the insertion worked
+ assert.eq(9, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data is correct
+ assert.eq(9, testColl.count());
+ assert.eq(1, testColl.count({num: 1}));
+ assert.eq(1, testColl.count({flt: 1.0}));
+ assert.eq(1, testColl.count({str: '1'}));
+ assert.eq(1, testColl.count({obj: {a: 1}}));
+ assert.eq(1, testColl.count({arr: [0, 1]}));
+ assert.eq(1, testColl.count({bd: new BinData(0, '1234')}));
+ assert.eq(1, testColl.count({date: ISODate('2009-08-27T12:34:56.789')}));
+ assert.eq(1, testColl.count({ts: new Timestamp(1234, 5678)}));
+ assert.eq(1, testColl.count({rx: {$exists: true}}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_broken_pipe.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_broken_pipe.js
new file mode 100644
index 00000000000..e1eb41d71cf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_broken_pipe.js
@@ -0,0 +1,46 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('export_broken_pipe');
+ var baseArgs = getCommonToolArguments();
+ baseArgs = baseArgs.concat('--port', toolTest.port);
+
+ if (toolTest.useSSL) {
+ baseArgs = baseArgs.concat([
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslAllowInvalidHostnames']);
+ }
+ var exportArgs = ['mongoexport', '-d', toolTest.db.getName(), '-c', 'foo'].concat(baseArgs);
+ var ddArgs = ['dd', 'count=1000000', 'bs=1024', 'of=/dev/null'];
+ if (_isWindows()) {
+ exportArgs[0] += '.exe';
+ }
+ exportArgs.unshift('set -o pipefail', '&&', 'PATH=.:$PATH');
+
+ var testDb = toolTest.db;
+ testDb.dropDatabase();
+ for (var i = 0; i < 500; i++) {
+ testDb.foo.insert({i: i});
+ }
+ assert.eq(500, testDb.foo.count(), 'foo should have our test documents');
+
+ var ret = runProgram('bash', '-c', exportArgs.concat('|', ddArgs).join(' '));
+ assert.eq(0, ret, "bash execution should succeed");
+ assert.strContains.soon('exported 500 records', rawMongoProgramOutput, 'should print the success message');
+
+ ddArgs = ['dd', 'count=100', 'bs=1', 'of=/dev/null'];
+ ret = runProgram('bash', '-c', exportArgs.concat('|', ddArgs).join(' '));
+ assert.neq(0, ret, "bash execution should fail");
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/broken pipe|The pipe is being closed/);
+ }, 'should print an error message');
+
+ testDb.dropDatabase();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_views.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_views.js
new file mode 100644
index 00000000000..a0ee1d7153e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/export_views.js
@@ -0,0 +1,80 @@
+// @tags: [requires_mongo_34](
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var dbName = 'test';
+ var toolTest = getToolTest('views');
+ var db = toolTest.db.getSiblingDB(dbName);
+ var commonToolArgs = getCommonToolArguments();
+
+ var exportTarget = 'views_export';
+ removeFile(exportTarget);
+
+ function addCitiesData() {
+ db.cities.insertMany([{
+ city: 'Boise',
+ state: 'ID',
+ }, {
+ city: 'Pocatello',
+ state: 'ID',
+ }, {
+ city: 'Nampa',
+ state: 'ID',
+ }, {
+ city: 'Albany',
+ state: 'NY',
+ }, {
+ city: 'New York',
+ state: 'NY',
+ }, {
+ city: 'Los Angeles',
+ state: 'CA',
+ }, {
+ city: 'San Jose',
+ state: 'CA',
+ }, {
+ city: 'Cupertino',
+ state: 'CA',
+ }, {
+ city: 'San Francisco',
+ state: 'CA',
+ }]);
+ }
+
+ function addStateView(state) {
+ db.createCollection('cities'+state, {
+ viewOn: 'cities',
+ pipeline: [{
+ $match: {state: state},
+ }],
+ });
+ }
+
+ addCitiesData();
+ addStateView('ID');
+ addStateView('NY');
+ addStateView('CA');
+
+ assert.eq(9, db.cities.count(), 'should have 9 cities');
+ assert.eq(3, db.citiesID.count(), 'should have 3 cities in Idaho view');
+ assert.eq(2, db.citiesNY.count(), 'should have 2 cities in New York view');
+ assert.eq(4, db.citiesCA.count(), 'should have 4 cities in California view');
+
+ var ret;
+
+ ret = toolTest.runTool.apply(toolTest, ['export', '-o', exportTarget, '-d', dbName, '-c', 'citiesCA']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'export should succeed');
+
+ db.dropDatabase();
+
+ ret = toolTest.runTool.apply(toolTest, ['import', exportTarget, '-d', dbName, '-c', 'CACities']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'export should succeed');
+
+ assert.eq(4, db.CACities.count(), 'restored view should have correct number of rows');
+
+ removeFile(exportTarget);
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/field_file.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/field_file.js
new file mode 100644
index 00000000000..e6139498266
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/field_file.js
@@ -0,0 +1,60 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to csv using the --fieldFile option
+ jsTest.log('Testing exporting to csv using the --fieldFile option');
+
+ var toolTest = getToolTest('field_file');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'jstests/export/testdata/field_file_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, using a field file that specifies 'a' and 'b'
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--type=csv',
+ '--fieldFile', 'jstests/export/testdata/simple_field_file']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type=csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+
+ // make sure only the specified fields were exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_csv.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_csv.js
new file mode 100644
index 00000000000..ecd2294eb1b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_csv.js
@@ -0,0 +1,173 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to csv using the --fields option.
+
+ jsTest.log('Testing exporting to csv using the --fields option');
+
+ var toolTest = getToolTest('fields_csv');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'fields_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, specifying only one field
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure only the specified field was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(0, destColl.count({b: 1}));
+ assert.eq(0, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, specifying all fields
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure everything was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(1, destColl.count({c: 3}));
+
+ // make sure the _id was NOT exported - the _id for the
+ // corresponding documents in the two collections should
+ // be different
+ var fromSource = sourceColl.findOne({a: 1, b: 1});
+ var fromDest = destColl.findOne({a: 1, b: 1});
+ assert.neq(fromSource._id, fromDest._id);
+
+
+ /* Test passing positional arguments to --fields */
+
+ // outputMatchesExpected takes an output string and returns
+ // a boolean indicating if any line of the output matched
+ // the expected string.
+ var outputMatchesExpected = function(output, expected) {
+ var found = false;
+ output.split('\n').forEach(function(line) {
+ if (line.match(expected)) {
+ found = true;
+ }
+ });
+ return found;
+ };
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ sourceColl.remove({});
+
+ // ensure source collection is empty
+ assert.eq(0, sourceColl.count());
+
+ // insert some data
+ sourceColl.insert({a: [1, 2, 3, 4, 5], b: {c: [-1, -2, -3, -4]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: {e: [4, 5, 6]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: 5, e: {"0": ["foo", "bar", "baz"]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: [4, 5, 6], e: [{"0": 0, "1": 1}, {"2": 2, "3": 3}]});
+
+ // ensure the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // use the following fields as filters:
+ var cases = [
+ {field: 'd.e.2', expected: /6/}, // specify nested field with array value
+ {field: 'e.0.0', expected: /foo/}, // specify nested field with numeric array value
+ {field: 'b,d.1,e.1.3', expected: /2,5,3/}, // specify varying levels of field nesting
+ ];
+
+ var output;
+
+ for (var i = 0; i < cases.length; i++) {
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--fields', cases[i].field,
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ output = cat(exportTarget);
+ jsTest.log("Fields Test " + (i + 1) + ": \n" + output);
+ assert.eq(outputMatchesExpected(output, cases[i].expected), true);
+ }
+
+ // test with $ projection and query
+ cases = [
+ {query: '{ d: 4 }', field: 'd.$', expected: /[4]/},
+ {query: '{ a: { $gt: 1 } }', field: 'a.$', expected: /[2]/},
+ {query: '{ "b.c": -1 }', field: 'b.c.$', expected: /[-1]/},
+ ];
+
+ for (i = 0; i < cases.length; i++) {
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--query', cases[i].query,
+ '--fields', cases[i].field,
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ output = cat(exportTarget);
+ jsTest.log("Fields + Query Test " + (i + 1) + ": \n" + output);
+ assert.eq(outputMatchesExpected(output, cases[i].expected), true);
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_json.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_json.js
new file mode 100644
index 00000000000..78c94be4fe0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/fields_json.js
@@ -0,0 +1,92 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to json with the --fields option
+
+ jsTest.log('Testing exporting to json using the --fields option');
+
+ var toolTest = getToolTest('fields_json');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'fields_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, specifying only one field
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure only the specified field was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(0, destColl.count({b: 1}));
+ assert.eq(0, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, specifying all fields
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure everything was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(1, destColl.count({c: 3}));
+
+ // make sure the _id was exported - the _id for the
+ // corresponding documents in the two collections should
+ // be the same
+ var fromSource = sourceColl.findOne({a: 1, b: 1});
+ var fromDest = destColl.findOne({a: 1, b: 1});
+ assert.eq(fromSource._id, fromDest._id);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/force_table_scan.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/force_table_scan.js
new file mode 100644
index 00000000000..8ae4f0af8e2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/force_table_scan.js
@@ -0,0 +1,126 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --forceTableScan specified.
+
+ jsTest.log('Testing exporting with --forceTableScan');
+
+ var toolTest = getToolTest('force_table_scan');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'force_table_scan_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // set the profiling level to high, so that
+ // we can inspect all queries
+ assert.eq(1, testDB.setProfilingLevel(2).ok);
+
+ // the profiling collection
+ var profilingColl = testDB.system.profile;
+
+ // run mongoexport without --forceTableScan
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the query from the profiling collection
+ var queries = profilingColl.find({op: 'query', ns: 'test.data'}).toArray();
+
+ // there should only be one query so far, and it should have snapshot set
+ assert.eq(1, queries.length);
+ if (queries[0].command === undefined) {
+ assert.eq(true, queries[0].query.$snapshot || queries[0].query.snapshot);
+ } else {
+ assert.eq(true, queries[0].command.snapshot);
+ }
+
+ // remove the export file
+ removeFile(exportTarget);
+
+ // run mongoexport again, with --forceTableScan
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--forceTableScan']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the queries again
+ queries = profilingColl.find({op: 'query', ns: 'test.data'}).sort({ts: 1}).toArray();
+
+ // there should be two queries, and the second one should not
+ // have snapshot set
+ assert.eq(2, queries.length);
+ if (queries[1].command === undefined) {
+ assert(!queries[1].query['$snapshot']);
+ } else {
+ assert.eq(true, !queries[1].command.snapshot);
+ }
+
+ // wipe the collection
+ testColl.remove({});
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the export with --forceTableScan exported the correct data
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // remove the export file
+ removeFile(exportTarget);
+
+ // run mongoexport again, without --forceTableScan but with --sort. --forceTableScan
+ // should be implicitly set
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{_id:1}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the queries again
+ queries = profilingColl.find({op: 'query', ns: 'test.data'}).sort({ts: 1}).toArray();
+
+ // there should be 3 queries, and the last one should not have snapshot set
+ assert.eq(3, queries.length);
+ if (queries[2].command === undefined) {
+ assert(!queries[2].query['$snapshot']);
+ } else {
+ assert.eq(true, !queries[2].command.snapshot);
+ }
+
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/json_array.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/json_array.js
new file mode 100644
index 00000000000..4c929d5d718
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/json_array.js
@@ -0,0 +1,57 @@
+(function() {
+
+ // Tests running mongoexport with the --jsonArray output option.
+
+ jsTest.log('Testing exporting with --jsonArray specified');
+
+ var toolTest = new ToolTest('json_array');
+ toolTest.startDB('foo');
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // the export target
+ var exportTarget = 'json_array_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 20; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(20, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool('export', '--out', exportTarget,
+ '--db', 'test', '--collection', 'data', '--jsonArray');
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // make sure that mongoimport without --jsonArray does not work
+ ret = toolTest.runTool('import', '--file', exportTarget,
+ '--db', 'test', '--collection', 'data');
+ assert.neq(0, ret);
+
+ // make sure nothing was imported
+ assert.eq(0, testColl.count());
+
+ // run mongoimport again, with --jsonArray
+ ret = toolTest.runTool('import', '--file', exportTarget,
+ '--db', 'test', '--collection', 'data', '--jsonArray');
+ assert.eq(0, ret);
+
+ // make sure the data was imported
+ assert.eq(20, testColl.count());
+ for (i = 0; i < 20; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/limit.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/limit.js
new file mode 100644
index 00000000000..114a4e3c98a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/limit.js
@@ -0,0 +1,61 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --limit specified.
+
+ jsTest.log('Testing exporting with --limit');
+
+ var toolTest = getToolTest('limit');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'limit_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({a: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data, using --limit
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{a:1}',
+ '--limit', '20']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the limit was applied to the export
+ assert.eq(20, testColl.count());
+ for (i = 0; i < 20; i++) {
+ assert.eq(1, testColl.count({a: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/namespace_validation.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/namespace_validation.js
new file mode 100644
index 00000000000..410b8986223
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/namespace_validation.js
@@ -0,0 +1,25 @@
+(function() {
+
+ // Tests running mongoexport with bad command line options.
+
+ jsTest.log('Testing exporting valid or invalid namespaces');
+
+ var toolTest = new ToolTest('system_collection');
+ toolTest.startDB('foo');
+
+ // run mongoexport with an dot in the db name
+ ret = toolTest.runTool('export', '--db', 'test.bar', '--collection', 'foo');
+ assert.neq(0, ret);
+
+ // run mongoexport with an " in the db name
+ ret = toolTest.runTool('export', '--db', 'test"bar', '--collection', 'foo');
+ assert.neq(0, ret);
+
+ // run mongoexport with a system collection
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'system.foobar');
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/nested_fields_csv.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/nested_fields_csv.js
new file mode 100644
index 00000000000..3668449c466
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/nested_fields_csv.js
@@ -0,0 +1,65 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests exporting nested fields to csv.
+
+ jsTest.log('Testing exporting nested fields to csv');
+
+ var toolTest = getToolTest('nested_fields_csv');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'nested_fields_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 2, b: {c: 2}});
+ sourceColl.insert({a: 3, b: {c: 3, d: {e: 3}}});
+ sourceColl.insert({a: 4, x: null});
+ // sanity check the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // export the data, specifying nested fields to export
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a,b.d.e,x.y']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--headerline']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the non-specified fields were ignored, and the
+ // specified fields were added correctly
+ assert.eq(0, destColl.count({'b.c': 2}));
+ assert.eq(0, destColl.count({'b.c': 3}));
+ assert.eq(1, destColl.count({'b.d.e': 3}));
+ assert.eq(3, destColl.count({'b.d.e': ''}));
+ assert.eq(1, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({a: 2}));
+ assert.eq(1, destColl.count({a: 3}));
+ assert.eq(4, destColl.count({'x.y': ''}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/no_data.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/no_data.js
new file mode 100644
index 00000000000..cfc9248bb5f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/no_data.js
@@ -0,0 +1,21 @@
+(function() {
+
+ // Tests running mongoexport with no data in the target collection.
+
+ jsTest.log('Testing exporting no data');
+
+ var toolTest = new ToolTest('no_data');
+ toolTest.startDB('foo');
+
+ // run mongoexport with no data, make sure it doesn't error out
+ var ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data');
+ assert.eq(0, ret);
+
+ // but it should fail if --assertExists specified
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data', '--assertExists');
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/pretty.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/pretty.js
new file mode 100644
index 00000000000..fbaad9582ce
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/pretty.js
@@ -0,0 +1,33 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('fields_json');
+ var commonToolArgs = getCommonToolArguments();
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+
+ // export it with pretty
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', "pretty.json",
+ '--db', 'test',
+ '--collection', 'source',
+ '--pretty',
+ '--jsonArray']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ parsed = JSON.parse(cat('pretty.json'));
+ assert.eq(parsed[0].a, 1);
+ assert.eq(parsed[1].b, 1);
+ assert.eq(parsed[2].b, 2);
+ assert.eq(parsed[2].c, 3);
+
+}());
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/query.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/query.js
new file mode 100644
index 00000000000..b1884ee4de3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/query.js
@@ -0,0 +1,198 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --query specified.
+
+ jsTest.log('Testing exporting with --query');
+
+ var toolTest = getToolTest('query');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'query_export.json';
+ removeFile(exportTarget);
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // insert some data
+ sourceColl.insert({a: 1, x: {b: '1'}});
+ sourceColl.insert({a: 2, x: {b: '1', c: '2'}});
+ sourceColl.insert({a: 1, c: '1'});
+ sourceColl.insert({a: 2, c: '2'});
+ // sanity check the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // export the data, with a query that will match nothing
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{a:3}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the export was blank
+ assert.eq(0, destColl.count());
+
+ // remove the export
+ removeFile(exportTarget);
+
+ // export the data, with a query matching a single element
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{a:1, c:'1'}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 1, c: '1'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // TOOLS-716 export the data, with a queryFile matching a single element
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--queryFile', "jstests/export/testdata/query.json"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 1, c: '1'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+
+ // export the data, with a query on an embedded document
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{a:2, 'x.c':'2'}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 2, "x.c": '2'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, with a blank query (should match everything)
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(4, destColl.count());
+
+ // TOOLS-469 test queries containing extended JSON field (like dates)
+ sourceColl.drop();
+ destColl.drop();
+ sourceColl.insert({
+ a: 1,
+ x: ISODate("2014-12-11T13:52:39.498Z"),
+ y: ISODate("2014-12-13T13:52:39.498Z")
+ });
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{x:{$gt:Date(1418305949498), $lt:Date(1418305979498)}, y:{$gt:{$date:1418478749498}, $lt:{$date:1418478769498}}}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret); assert.eq(1, destColl.count());
+
+ // TOOLS-530 add support for ISODate and string formatting for query flag
+ sourceColl.drop();
+ destColl.drop();
+ sourceColl.insert({
+ a: 1,
+ x: ISODate("2014-12-11T13:52:39.498Z"),
+ y: ISODate("2014-12-13T13:52:39.498Z")
+ });
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{x:{$gt:ISODate("2014-12-11T13:52:39.3Z"), $lt:ISODate("2014-12-11T13:52:39.5Z")}}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(1, destColl.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/slave_ok.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/slave_ok.js
new file mode 100644
index 00000000000..08936eda33e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/slave_ok.js
@@ -0,0 +1,63 @@
+(function() {
+ // Tests running mongoexport with --slaveOk.
+
+ jsTest.log('Testing exporting with --slaveOk');
+
+ // bring up a replica set with 3 nodes
+ var replTest = new ReplSetTest({
+ name: 'slave_ok',
+ nodes: 3,
+ oplogSize: 5,
+ useHostName: true,
+ });
+ var nodes = replTest.startSet();
+ replTest.initiate();
+ replTest.awaitSecondaryNodes();
+
+ // cache the primary
+ var primary = replTest.getPrimary();
+
+ // the export target
+ var exportTarget = 'slave_ok_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ var testDB = primary.getDB('test');
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+ replTest.awaitReplication();
+
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // make sure that exporting from any of the nodes works with --slaveOk
+ nodes.forEach(function(node) {
+ // remove the export, clean the destination collection
+ removeFile(exportTarget);
+ testDB.dest.remove({});
+ printjson(replTest.status());
+
+ var ret = runMongoProgram('mongoexport',
+ '--db', 'test',
+ '--collection', 'data',
+ '--host', node.host,
+ '--slaveOk',
+ '--out', exportTarget);
+ assert.eq(0, ret);
+
+ ret = runMongoProgram('mongoimport',
+ '--db', 'test',
+ '--collection', 'dest',
+ '--host', primary.host,
+ '--file', exportTarget);
+ assert.eq(0, ret);
+ assert.eq(10, testDB.dest.count());
+ });
+
+ // success
+ replTest.stopSet();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/sort_and_skip.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/sort_and_skip.js
new file mode 100644
index 00000000000..5dade52786b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/sort_and_skip.js
@@ -0,0 +1,69 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --sort and --skip specified.
+
+ jsTest.log('Testing exporting with --sort and --skip');
+
+ var toolTest = getToolTest('sort_and_skip');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'sort_and_skip_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data, in a different order than we'll be sorting it
+ var data = [];
+ for (var i = 30; i > 20; i--) {
+ data.push({a: i});
+ }
+ for (i = 31; i < 50; i++) {
+ data.push({a: i});
+ }
+ for (i = 20; i >= 0; i--) {
+ data.push({a: i});
+ }
+ testColl.insertMany(data, {ordered: true});
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data, using --skip
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{a:1}',
+ '--skip', '20']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the skip was applied to the export, and that
+ // the sort functioned so that the correct documents
+ // were skipped
+ assert.eq(30, testColl.count());
+ for (i = 20; i < 50; i++) {
+ assert.eq(1, testColl.count({a: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/stdout.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/stdout.js
new file mode 100644
index 00000000000..4c6b502f549
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/stdout.js
@@ -0,0 +1,42 @@
+// Tests running mongoexport writing to stdout.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing exporting to stdout');
+
+ var toolTest = new ToolTest('stdout');
+ toolTest.startDB('foo');
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 20; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(20, testColl.count());
+
+ // export the data, writing to stdout
+ var ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data');
+ assert.eq(0, ret);
+
+ // wait for full output to appear
+ assert.strContains.soon('exported 20 records', rawMongoProgramOutput,
+ 'should show number of exported records');
+
+ // grab the raw output
+ var output = rawMongoProgramOutput();
+
+ // make sure it contains the json output
+ for (i = 0; i < 20; i++) {
+ assert.neq(-1, output.indexOf('{"_id":'+i+'.0}'));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/testdata/simple_field_file b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/testdata/simple_field_file
new file mode 100644
index 00000000000..422c2b7ab3b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/testdata/simple_field_file
@@ -0,0 +1,2 @@
+a
+b
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/type_case.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/type_case.js
new file mode 100644
index 00000000000..8d6141f4094
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/export/type_case.js
@@ -0,0 +1,115 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Testing exporting with various type specifiers
+
+ jsTest.log('Testing exporting with various type specifiers');
+
+ var toolTest = getToolTest('export_types');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+
+ // the export target
+ var exportTarget = 'type_export';
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // first validate that invalid types are rejected
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="foobar"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(3, ret);
+
+ // create a dump file using a lowercase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".csv",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="csv"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var csvmd5 = md5sumFile(exportTarget + ".csv");
+
+ // create a dump file using a uppercase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".CSV",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="CSV"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ var CSVmd5 = md5sumFile(exportTarget + ".CSV");
+ // the files for the uppercase and lowercase types should match
+ assert.eq(csvmd5, CSVmd5);
+
+ // create a dump file using a mixedcase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".cSv",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="cSv"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ var cSvmd5 = md5sumFile(exportTarget + ".cSv");
+ // the files for the uppercase and lowercase types should match
+ assert.eq(csvmd5, cSvmd5);
+
+ // then some json type tests
+
+ // create a dump file using a lowercase json type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".json",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="json"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var jsonmd5 = md5sumFile(exportTarget + ".json");
+
+ // create a dump file using a uppercase json type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".JSON",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="JSON"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var JSONmd5 = md5sumFile(exportTarget + ".JSON");
+
+ // create a dump file using a uppercase blank (json) type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".blank",
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var blankmd5 = md5sumFile(exportTarget + ".blank");
+ assert.eq(JSONmd5, jsonmd5);
+ assert.eq(blankmd5, jsonmd5);
+
+ // sanity check
+ assert.neq(csvmd5, jsonmd5);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_db.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_db.js
new file mode 100644
index 00000000000..33612fdf6a9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_db.js
@@ -0,0 +1,61 @@
+// mongofiles_db.js; ensure that running mongofiles using the db flag works as
+// expected
+var testName = 'mognofiles_db';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --host option');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with valid host name with ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('otherdb');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--db', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--db', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the files were inserted into the right db
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 1');
+
+ // test short form
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '-d', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 3 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '-d', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 4 failed');
+
+ // ensure the file was inserted into the right db
+ assert.eq(4, db.getCollection('fs.files').count(), 'unexpected fs.files count 2s');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_delete.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_delete.js
new file mode 100644
index 00000000000..74b1b8cd295
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_delete.js
@@ -0,0 +1,47 @@
+// mongofiles_delete.js; ensure that delete command works as expected
+var testName = 'mongofiles_delete';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles delete command');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // ensure tool runs without error
+ for (var i = 0; i < 10; i++) {
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed');
+ }
+
+ // ensure all the files were written
+ assert.eq(10, db.fs.files.count(), 'unexpected fs.files count');
+
+ jsTest.log('Deleting file');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'delete', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'delete failed');
+
+ // ensure all the files were deleted
+ assert.eq(0, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(0, db.fs.chunks.count(), 'unexpected fs.chunks count');
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_get.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_get.js
new file mode 100644
index 00000000000..621669a0ac0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_get.js
@@ -0,0 +1,81 @@
+// mongofiles_get.js; ensure that get command works as expected
+var testName = 'mongofiles_get';
+(function() {
+ jsTest.log('Testing mongofiles get command');
+ load('jstests/files/util/mongofiles_common.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+ var getFile = testName + (Math.random() + 1).toString(36).substring(7);
+
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the file was inserted
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count 1');
+ var fileId = db.fs.files.findOne()._id;
+
+ jsTest.log('Getting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'get failed');
+
+ // ensure the retrieved file is exactly the same as that inserted
+ var actual = md5sumFile(filesToInsert[0]);
+ var expected = md5sumFile(getFile);
+
+ assert.eq(actual, expected, 'mismatched md5 sum - expected ' + expected + ' got ' + actual);
+
+ // ensure tool runs get_id without error
+ var idAsJSON = fileId.tojson();
+ if (_isWindows()) {
+ idAsJSON = '"' + idAsJSON.replace(/"/g, '\\"') + '"';
+ }
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get_id', idAsJSON]
+ .concat(passthrough.args)),
+ 0, 'get_id failed');
+ expected = md5sumFile(getFile);
+ assert.eq(actual, expected, 'mismatched md5 sum on _id - expected ' + expected + ' got ' + actual);
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // test getting to stdout
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '-',
+ 'get', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'get stdout failed');
+ var expectedContent = "this is a text file";
+ assert.strContains.soon(expectedContent, rawMongoProgramOutput,
+ "stdout get didn't match expected file content");
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_host.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_host.js
new file mode 100644
index 00000000000..0fa69d1dc0c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_host.js
@@ -0,0 +1,59 @@
+// mongofiles_host.js; ensure that running mongofiles using valid and invalid
+// host names or IP addresses succeeds/fails as expected
+var testName = 'mongofiles_host';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --host option');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with valid host name with ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', '127.0.0.1',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the file was inserted
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 1');
+
+ jsTest.log('Putting file with invalid host name with ' + passthrough.name + ' passthrough');
+
+ // ensure tool exits with a non-zero exit code when supplied invalid hosts
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', 'does-not-exist',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 1');
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', '555.555.555.555',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 2');
+
+ // ensure the file was not inserted
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 2');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_invalid.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_invalid.js
new file mode 100644
index 00000000000..03128feb201
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_invalid.js
@@ -0,0 +1,37 @@
+// mongofiles_invalid.js; runs mongofiles with an invalid command and
+// option - ensures it fails in all cases
+var testName = 'mongofiles_invalid';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles with invalid commands and options');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ jsTest.log('Running with file with invalid options onw passthrough ' + passthrough.name);
+
+ // run with invalid option
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--invalid', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'invalid-option: mongofiles succeeded when it should have failed');
+
+ // run with invalid command
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'invalid', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'invalid-command: mongofiles succeeded when it should have failed');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_list.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_list.js
new file mode 100644
index 00000000000..db3cb16f323
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_list.js
@@ -0,0 +1,96 @@
+// mongofiles_list.js; tests the mongofiles list option by doing the following:
+//
+// 1. Inserts the mongod/mongo binaries using mongofiles put
+// 2. Checks that the actual md5 of the file matches what's stored in the database
+// 3. Runs the mongofiles list command to view all files stored.
+// 4. Ensures that all the files inserted and returned.
+// 5. Ensures that the returned list matches thae actual filesToInsert[0] and size of
+// files inserted.
+var testName = 'mongofiles_list';
+(function() {
+ jsTest.log('Testing mongofiles list command');
+ load('jstests/libs/extended_assert.js');
+ load('jstests/files/util/mongofiles_common.js');
+ var assert = extendedAssert;
+
+ var putFile = function(passthrough, conn, file) {
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', file]
+ .concat(passthrough.args)),
+ 0, 'put for ' + file + 'failed');
+ var db = conn.getDB('test');
+ var fileObj = db.fs.files.findOne({
+ filename: file,
+ });
+ assert(fileObj, 'could not find put file ' + file);
+ assert.eq(md5sumFile(file), fileObj.md5, file + ' md5 did not match - expected ' + md5sumFile(file) + ' got ' + fileObj.md5);
+ return fileObj.length;
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting GridFS files with ' + passthrough.name + ' passthrough');
+
+ var inputFileRegex = /^sh.*files.*/;
+ var whitespaceSplitRegex = /,?\s+/;
+ var fileSizes = [];
+
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ filesToInsert.forEach(function(file) {
+ var fileSize = putFile(passthrough, conn, file);
+ fileSizes.push(fileSize);
+ });
+
+ jsTest.log('Running mongofiles list');
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // ensure tool runs without error
+ var pid = startMongoProgramNoConnect.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--quiet', 'list']
+ .concat(passthrough.args));
+ assert.eq(waitProgram(pid), 0, 'list command failed but was expected to succeed');
+
+ jsTest.log('Verifying list output');
+
+ var files;
+ assert.neq.soon(0, function() {
+ files = rawMongoProgramOutput()
+ .split('\n')
+ .filter(function(line) {
+ return line.indexOf('sh'+pid) !== -1 && line.match(inputFileRegex);
+ });
+ return files.length;
+ }, 'should find some files');
+
+ // ensure that the returned files and their sizes are as expected
+ files.forEach(function(currentFile, index) {
+ // should print mongod and then mongo
+ var fileEntry = currentFile.split(whitespaceSplitRegex);
+
+ // the list command should have 2 entries - the file name and its size
+ // we check for 3 files because of the sh. prefix in our js test framework
+ assert.eq(fileEntry.length, 3, 'unexpected list output on ' + currentFile + ' - expected 3 but got ' + fileEntry.length);
+
+ // ensure the expected file name is what is printed
+ assert.eq(fileEntry[1], filesToInsert[index], 'expected file ' + filesToInsert[1] + ' got ' + fileEntry[1]);
+
+ // ensure the expected file size is what is printed
+ assert.eq(fileEntry[2], fileSizes[index], 'expected size ' + fileSizes[2] + ' got ' + fileEntry[2]);
+ });
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_local.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_local.js
new file mode 100644
index 00000000000..c4f2ccade15
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_local.js
@@ -0,0 +1,102 @@
+// mongofiles_local.js; ensure that when --local is passed:
+// a. for puts, the supplied argument is read and stored using the gridfs filename
+// b. for gets, the supplied argument is used to store the retrieved file
+// c. for puts, if the supplied argument is the empty string, an error should occur
+// d. for gets, if the supplied argument is the empty string, the file name is used
+var testName = 'mongofiles_local';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --local option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // generate a random GridFS name for the file
+ var putFSName = testName + (Math.random() + 1).toString(36).substring(7);
+ var getFSName = testName + (Math.random() + 1).toString(36).substring(7);
+
+ jsTest.log('Running put on file with --local');
+
+ // ensure tool runs without error with a non-empty --local argument
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '-l', filesToInsert[0],
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+
+ // ensure the file exists
+ assert(db.fs.files.findOne({
+ filename: putFSName
+ }), 'did not find expected GridFS file - ' + putFSName);
+
+ // ensure tool returns an error if the --local argument does not exist
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', filesToInsert[0] + '?',
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed 2');
+
+ // if the argument is empty, use the putFSName - which should cause an error since it doesn't exist
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed 3');
+
+ // if the argument is empty, and the GridFS file exists, it should run
+ // without error on linux and fails on windows
+ var comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 2');
+
+ jsTest.log('Running get on file with --local');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFSName,
+ 'get', putFSName]
+ .concat(passthrough.args)),
+ 0, 'get failed when it should have succeeded 1');
+
+ // ensure the right file name was written
+ assert.eq(md5sumFile(filesToInsert[0]), md5sumFile(getFSName), 'files do not match!');
+
+ // ensure tool uses the GridFS name if the --local argument is empty on linux
+ // and fails on windows
+ comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'get', putFSName]
+ .concat(passthrough.args)),
+ 0, 'get failed unexpectedly');
+
+ if (!_isWindows()) {
+ assert.eq(md5sumFile(filesToInsert[0]), md5sumFile(putFSName), 'md5sums do not match - expected ' + md5sumFile(filesToInsert[0]) + ' got ' + md5sumFile(putFSName));
+ }
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_port.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_port.js
new file mode 100644
index 00000000000..1f1e4e8e219
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_port.js
@@ -0,0 +1,52 @@
+// mongofiles_port.js; ensure that supplying valid/invalid port addresses
+// succeeds/fails as expected
+var testName = 'mongofiles_port';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --port option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Putting file with valid port with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the file was inserted
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count 1');
+
+ jsTest.log('Putting file with invalid port with ' + passthrough.name + ' passthrough');
+
+ // ensure tool exits with a non-zero exit code when supplied invalid ports
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', '12345',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 1');
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', 'random',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 2');
+
+ // ensure the file was not inserted
+ var count = db.fs.files.count();
+ assert.eq(1, count, 'unexpected fs.files count - expected 2 but got ' + count);
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_prefix.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_prefix.js
new file mode 100644
index 00000000000..2e2195c778d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_prefix.js
@@ -0,0 +1,49 @@
+// mongofiles_prefix.js; ensure that passing --prefix works as expected - the
+// provided prefix is used as the collection name prefix
+var testName = 'mongofiles_prefix';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --prefix option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Putting file without --prefix with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the default collection name prefix was used
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(0, db[testName + '.files'].count(), 'unexpected ' + testName + '.files count');
+
+ jsTest.log('Putting file with --prefix with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--prefix', testName,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the supplied collection name prefix was used
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(1, db[testName + '.files'].count(), 'unexpected ' + testName + '.files count');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_put.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_put.js
new file mode 100644
index 00000000000..19c1ebe3fb1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_put.js
@@ -0,0 +1,127 @@
+// mongofiles_put.js; ensure that put works with very large files.
+// NOTE: this test uses mongodump to create a large file
+var testName = 'mongofiles_put';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles put command');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // create a large collection and dump it
+ jsTest.log('Creating large collection with ' + passthrough.name + ' passthrough');
+
+ var insertString = new Array(100).join("mongoDB");
+ var inserted = 0;
+ var num = 0;
+ var dbName = 'test';
+ var collection = 'foo';
+ var bulk = db[collection].initializeUnorderedBulkOp();
+
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({
+ _id: num++,
+ str: insertString
+ });
+ inserted += insertString.length;
+ }
+
+ assert.writeOK(bulk.execute({w: "majority"}));
+
+ // dumping large collection to single large file
+ jsTest.log('Dumping collection to filesystem with ' + passthrough.name + ' passthrough');
+
+ var dumpDir = './dumpDir';
+
+ assert.eq(runMongoProgram.apply(this, ['mongodump',
+ '-d', dbName,
+ '--port', conn.port,
+ '-c', collection,
+ '--out', dumpDir]
+ .concat(passthrough.args)),
+ 0, 'dump failed when it should have succeeded');
+
+ jsTest.log('Putting directory');
+
+ // putting a directory should fail
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', dumpDir]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed');
+
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ var putFile = dumpDir + '/' + dbName + '/' + collection + '.bson';
+
+ // ensure putting of the large file succeeds
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', putFile,
+ 'put', testName]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded');
+
+ // verify file metadata
+ var fileObj = db.fs.files.findOne({
+ filename: testName
+ });
+ assert(fileObj, testName + ' was not found');
+
+ var numDbChunks = db.fs.chunks.count();
+
+ // the number of chunks should be equal to math.ceil[fileSize (KB) / 255 KB]
+ // filesize for the dump should be s bytes
+ var expectedNumChunks = Math.ceil(fileObj.length / (1024 * 255));
+
+ assert.eq(expectedNumChunks, numDbChunks, 'expected ' + expectedNumChunks + ' chunks; got ' + numDbChunks);
+
+ // now attempt to get the large file
+ jsTest.log('Getting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ var getFile = testName + (Math.random() + 1).toString(36).substring(7);
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get', testName]
+ .concat(passthrough.args)),
+ 0, 'get failed');
+
+ // ensure the retrieved file is exactly the same as that inserted
+ var actual = md5sumFile(putFile);
+ var expected = md5sumFile(getFile);
+
+ assert.eq(actual, expected, 'mismatched md5 sum - expected ' + expected + ' got ' + actual);
+
+ // test put_id with duplicate _id
+ const dupId = '1';
+
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put_id', filesToInsert[0], dupId]
+ .concat(passthrough.args)),
+ 0, 'put_id failed when it should have succeeded');
+
+ const numChunks = db.fs.chunks.count();
+
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put_id', filesToInsert[1], dupId]
+ .concat(passthrough.args)),
+ 0, 'put_id succeeded when it should have failed');
+
+ assert.eq(numChunks, db.fs.chunks.count(), 'existing chunks were modified when they should not have been');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_replace.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_replace.js
new file mode 100644
index 00000000000..5fc3810b9f7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_replace.js
@@ -0,0 +1,79 @@
+// mongofiles_replace.js; ensure that after putting a file once multiple times,
+// on using --replace, any and all occurences of the given file is replaced in
+// the GridFS collection - all other files are left as is
+var testName = 'mongofiles_replace';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --replace option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Running put on file with --replace with ' + passthrough.name + ' passthrough');
+
+ // insert the same file a couple of times
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 2');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 3');
+
+ // ensure that it is never overwritten
+ db.fs.files.findOne({
+ filename: filesToInsert[0]
+ });
+
+ assert.eq(db.fs.files.count(), 3, 'expected 3 files inserted but got ' + db.fs.files.count());
+
+ // now run with --replace
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--replace',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 4');
+
+ assert.eq(db.fs.files.count(), 1, 'expected 1 file inserted but got ' + db.fs.files.count());
+
+ // insert other files but ensure only 1 is replaced
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[1]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 5');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[2]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 6');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--replace',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 7');
+
+ assert.eq(db.fs.files.count(), 3, 'expected 3 files inserted but got ' + db.fs.files.count());
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_search.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_search.js
new file mode 100644
index 00000000000..d1b31c7fb0c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_search.js
@@ -0,0 +1,110 @@
+// mongofiles_search.js; ensures that the search command returns any and all
+// files that match the regex supplied
+var testName = 'mongofiles_search';
+(function() {
+ load('jstests/files/util/mongofiles_common.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var conn;
+
+ // Given a list of search strings and an expected result - 0 for present or 1 for
+ // hasMatch takes in raw mongofiles search output and a matchItem; it returns 0
+ // if it finds the match item in any line of the output and 1 otherwise. If the
+ // exactString argument is not empty, hasMatch further checks that the line
+ // matches the argument
+ var hasMatch = function(output, matchItem, exactString) {
+ var lines = output.split('\n');
+ var shellOutputRegex = /^sh.*/;
+ for (var i = 0; i < lines.length; i++) {
+ if (lines[i].match(shellOutputRegex) && lines[i].match(matchItem)) {
+ if (exactString && !lines[i].match(exactString)) {
+ continue;
+ }
+ return 0;
+ }
+ }
+ // matchItem wasn't found
+ return 1;
+ };
+
+ // note - assertHasFiles checks that the output of running mongofiles search with
+ // each of the search strings meets the expected result supplied. If exactString
+ // is not empty, it further checks that the output also matches exactString
+ var assertHasFiles = function(passthrough, searchStrings, expectedResult, exactString) {
+ // perform a couple of search commands against the GridFS collection
+ for (var i = 0; i < searchStrings.length; i++) {
+ clearRawMongoProgramOutput();
+ var queryString = searchStrings[i];
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--quiet',
+ '--port', conn.port,
+ 'search', queryString]
+ .concat(passthrough.args)),
+ 0, 'search command failed on ' + queryString + ' - part of ' + searchStrings);
+
+ // eslint-disable-next-line no-loop-func
+ assert.eq.soon(expectedResult, function() {
+ return hasMatch(rawMongoProgramOutput(), queryString, exactString);
+ }, 'search failed: expected "' + queryString + '" to be ' + (expectedResult ? 'found' : 'missing'));
+ }
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Testing mongofiles search command');
+ var t = topology.init(passthrough);
+ conn = t.connection();
+
+ jsTest.log('Putting files into GridFS with ' + passthrough.name + ' passthrough');
+
+ for (var i = 0; i < filesToInsert.length; i++) {
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[i]]
+ .concat(passthrough.args)),
+ 0, 'put failed on ' + filesToInsert[i] + ' when it should have succeeded');
+ }
+
+ jsTest.log('Searching files in GridFS with ' + passthrough.name + ' passthrough');
+
+ // these search strings should be matched
+ var searchStrings = ['files', '.txt', 'ile', '.'];
+
+ // add the verbatim file names put into GridFS
+ for (i = 0; i < filesToInsert.length; i++) {
+ searchStrings.push(filesToInsert[i]);
+ }
+
+ // all inserted files should be returned
+ assertHasFiles(passthrough, searchStrings, 0);
+
+ // these search strings should NOT be matched
+ searchStrings = ['random', 'always', 'filer'];
+ assertHasFiles(passthrough, searchStrings, 1);
+
+ // test that only the requested file is returned
+ for (i = 0; i < filesToInsert.length; i++) {
+ var currentFile = filesToInsert[i];
+ jsTest.log('Searching for file ' + currentFile + ' with ' + passthrough.name + ' passthrough');
+
+ // ensure the requested file is returned
+ assertHasFiles(passthrough, [currentFile], 0);
+
+ // ensure no other files are returned
+ assertHasFiles(passthrough,
+ // eslint-disable-next-line no-loop-func
+ filesToInsert.filter(function(file) {
+ return file !== currentFile;
+ }), 1, currentFile);
+ }
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_type.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_type.js
new file mode 100644
index 00000000000..7614706edb0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_type.js
@@ -0,0 +1,63 @@
+// mongofiles_type.js; ensure that the given content type is stored when passed
+// as the --type argument. If no argument is passed, it should be omitted in the
+// database.
+var testName = 'mongofiles_type';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --type option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+ var contentType = 'txt';
+
+ jsTest.log('Running put on file with --type with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error with a non-empty --type argument
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '-t', contentType,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+
+ var fileObj = db.fs.files.findOne({
+ filename: filesToInsert[0]
+ });
+
+ assert(fileObj, 'did not find expected GridFS file - ' + filesToInsert[0]);
+
+ assert.eq(fileObj.contentType, contentType, 'unexpected content type - found ' + fileObj.contentType + ' but expected ' + contentType);
+
+ // ensure tool runs without error with empty --type argument on linux
+ // and fails on windows
+ var comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--type', '',
+ 'put', filesToInsert[1]]
+ .concat(passthrough.args)),
+ 0, 'put failed unexpectedly');
+
+ if (!_isWindows()) {
+ fileObj = db.fs.files.findOne({
+ filename: filesToInsert[1]
+ });
+ assert.neq(fileObj, null, 'did not find expected GridFS file - ' + filesToInsert[1]);
+ assert.eq(fileObj.contentType, undefined, 'unexpected content type - found ' + fileObj.contentType + ' but expected undefined');
+ }
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_version.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_version.js
new file mode 100644
index 00000000000..11625ba2a70
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_version.js
@@ -0,0 +1,29 @@
+// mongofiles_version.js; ensure that getting the version works without error
+var testName = 'mongofiles_version';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --version option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ jsTest.log('Testing --version with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--version']
+ .concat(passthrough.args)),
+ 0, '--version failed');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern.js
new file mode 100644
index 00000000000..fea75f5c81b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern.js
@@ -0,0 +1,57 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+ var dbOne = rs.nodes[0].getDB("dbOne");
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ ret = toolTest.runTool.apply(toolTest, ['files',
+ '-vvvvv',
+ '-d', 'dbOne']
+ .concat(writeConcern)
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongofiles',
+ '-d', 'dbOne',
+ '--writeConcern={w:3}',
+ '--host', rs.getPrimary().host]
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongofiles", rs, toolTest, writeConcernTestFunc, noConnectTest, testSetup);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js
new file mode 100644
index 00000000000..a2d810fdcad
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js
@@ -0,0 +1,62 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+ var dbOne = st.s.getDB('dbOne');
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ ret = toolTest.runTool.apply(toolTest, ['files',
+ '-vvvvv',
+ '-d', 'dbOne']
+ .concat(writeConcern)
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongofiles',
+ '-d', 'dbOne',
+ '--writeConcern={w:3}',
+ '--host', st.s.host]
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongofiles", rs, toolTest, writeConcernTestFunc, noConnectTest, testSetup);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files1.txt b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files1.txt
new file mode 100644
index 00000000000..e9ea42a12b9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files1.txt
@@ -0,0 +1 @@
+this is a text file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files2.txt b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files2.txt
new file mode 100644
index 00000000000..6d65e626d46
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files2.txt
@@ -0,0 +1 @@
+this is another text file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files3.txt b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files3.txt
new file mode 100644
index 00000000000..181ba5fd828
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/testdata/files3.txt
@@ -0,0 +1 @@
+this is yet another test file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/util/mongofiles_common.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/util/mongofiles_common.js
new file mode 100644
index 00000000000..7ff85d959b1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/files/util/mongofiles_common.js
@@ -0,0 +1,10 @@
+// mongofiles_common.js; contains variables used by mongofiles tests
+load('jstests/common/topology_helper.js');
+
+/* exported filesToInsert */
+// these must have unique names
+var filesToInsert = [
+ 'jstests/files/testdata/files1.txt',
+ 'jstests/files/testdata/files2.txt',
+ 'jstests/files/testdata/files3.txt'
+];
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/all_primaries_down_error_code.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/all_primaries_down_error_code.js
new file mode 100644
index 00000000000..2fe19fc341e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/all_primaries_down_error_code.js
@@ -0,0 +1,65 @@
+/**
+ * all_primaries_down_error_code.js
+ *
+ * This file tests TOOLS-690 where mongoimport returned exit code 0 when it should have returned
+ * exit code 1 on error. The error stems from when mongos cannot find a primary.
+ * This file tests that errors of type 'could not contact primary for replica set' return exit
+ * code 1.
+ */
+(function() {
+ 'use strict';
+ jsTest.log('Testing mongoimport when a sharded cluster has no primaries');
+
+ var sh = new ShardingTest({
+ name: 'all_primaries_down_error_code',
+ shards: 1,
+ verbose: 0,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 3,
+ chunksize: 1,
+ enableBalancer: 0,
+ },
+ });
+
+ // Make sure there is no primary in any replica set.
+ for (var rs of sh._rs) {
+ var ranOutOfPrimaries = false;
+ for (var i = 0; i < rs.nodes.length + 1; i++) {
+ var primary;
+ try {
+ // If we can't find a primary in 20 seconds than assume there are no more.
+ primary = rs.test.getPrimary(20000);
+ } catch (e) {
+ print('Error Finding Primary: ' + e);
+ ranOutOfPrimaries = true;
+ break;
+ }
+
+ jsTest.log('Stepping down ' + primary.host);
+
+ try {
+ primary.adminCommand({replSetStepDown: 300, force: true});
+ } catch (e) {
+ // Ignore any errors that occur when stepping down the primary.
+ print('Error Stepping Down Primary: ' + e);
+ }
+ }
+ // Assert that we left due to running out of primaries and not due to the loop ending.
+ assert(ranOutOfPrimaries,
+ 'Had to kill primary more times than number of nodes in the replset.');
+ }
+
+ // Check that we catch 'could not contact primary for replica set'
+ jsTest.log('All primaries stepped down, trying to import.');
+
+ var ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/boolean_type.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/boolean_type.js
new file mode 100644
index 00000000000..b604bc10c93
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/boolean_type.js
@@ -0,0 +1,57 @@
+/**
+ * boolean_type.js
+ *
+ * This file tests the Boolean() type in mongoimport. Importing a document with a field like
+ * Boolean(1) should be treated identically to how the shell would insert a similar document.
+ */
+
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with various options in the Boolean() type');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+ var testDocs = [
+ {key: 'a', bool: Boolean(1)},
+ {key: 'b', bool: Boolean(0)},
+ {key: 'c', bool: Boolean(140)},
+ {key: 'd', bool: Boolean(-140.5)},
+ {key: 'e', bool: Boolean(Boolean(1))},
+ {key: 'f', bool: Boolean(Boolean(0))},
+ {key: 'g', bool: Boolean('')},
+ {key: 'h', bool: Boolean('f')},
+ {key: 'i', bool: Boolean(null)},
+ {key: 'j', bool: Boolean(undefined)},
+ {key: 'k', bool: Boolean(true)},
+ {key: 'l', bool: Boolean(false)},
+ {key: 'm', bool: Boolean(true, false)},
+ {key: 'n', bool: Boolean(false, true)},
+ {key: 'o', bool: [Boolean(1), Boolean(0), Date(23)]},
+ {key: 'p', bool: Boolean(Date(15))},
+ {key: 'q', bool: Boolean(0x585)},
+ {key: 'r', bool: Boolean(0x0)},
+ {key: 's', bool: Boolean()},
+ ];
+
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', 'jstests/import/testdata/boolean.json',
+ '--db', 'imported',
+ '--collection', 'testcollbool']
+ .concat(commonToolArgs));
+ assert.eq(ret, 0);
+
+ // Confirm that mongoimport imports the testDocs identically to how the shell interprets them.
+ var coll = db1.getSiblingDB('imported').testcollbool;
+ for (var i = 0; i < testDocs.length; i++) {
+ var postImportDoc = coll.findOne({key: testDocs[i].key});
+ assert.eq(testDocs[i].key, postImportDoc.key,
+ 'imported doc ' + testDocs[i].key + 'does not match original');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/collections.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/collections.js
new file mode 100644
index 00000000000..0ea4c87daa9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/collections.js
@@ -0,0 +1,77 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+
+ // copy the file to a file that contains the collection name
+ removeFile("foo.blah.json");
+ copyFile(toolTest.extFile, "foo.blah.json");
+
+ // copy the file to a file that contains the collection name plus an extra extension (.backup)
+ removeFile("foo.blah.json.backup");
+ copyFile(toolTest.extFile, "foo.blah.json.backup");
+
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.count(), 2,
+ "importing file named after collection should insert to correct namespace");
+ db1.c.getDB().getSiblingDB("test").foo.blah.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json.backup"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.json.count(), 2,
+ "importing file with extra extension should still assume correct namespace");
+ db1.c.getDB().getSiblingDB("test").foo.blah.json.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json",
+ "--collection", "testcoll1"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").testcoll1.count(), 2,
+ "importing --file with --collection should use correct collection name");
+ db1.c.getDB().getSiblingDB("test").testcoll1.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "foo.blah.json"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.count(), 2,
+ "should be allowed to specify file as positional arg");
+ db1.c.getDB().getSiblingDB("test").foo.blah.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "foo.blah.json",
+ "--db", "testdb2"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("testdb2").foo.blah.count(), 2,
+ "should use database specified by --db");
+ db1.c.getDB().getSiblingDB("testdb2").foo.blah.drop();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/decimal128.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/decimal128.js
new file mode 100644
index 00000000000..9a9cd02050e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/decimal128.js
@@ -0,0 +1,44 @@
+(function() {
+ // skip this test where NumberDecimal is unsupported (3.2 and earlier)
+ if (typeof NumberDecimal === 'undefined') {
+ return;
+ }
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ jsTest.log('Testing running import with various data types');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var testDoc = {
+ _id: "foo",
+ x: NumberDecimal("124124"),
+ };
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "initial collection is not empty");
+ db1.c.save(testDoc);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "dec128"]
+ .concat(commonToolArgs));
+ var importedDocs = db1.c.getDB().getSiblingDB("imported").dec128.find().toArray();
+
+ assert.eq(importedDocs.length, 1, "incorrect # of docs imported");
+
+ var importedDoc = importedDocs[0];
+
+ assert.eq(importedDoc, testDoc, "imported doc and test doc do not match");
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/drop.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/drop.js
new file mode 100644
index 00000000000..d845ccbbceb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/drop.js
@@ -0,0 +1,48 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('import_writes');
+ var db = toolTest.db.getSiblingDB("droptest");
+ var commonToolArgs = getCommonToolArguments();
+
+ // Verify that --drop works.
+ // put a test doc in the collection, run import with --drop,
+ // make sure that the inserted doc is gone and only the imported
+ // docs are left.
+ db.c.insert({x: 1});
+ assert.eq(db.c.count(), 1, "collection count should be 1 at setup");
+ var ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/csv_header.csv",
+ "--type=csv",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--headerline",
+ "--drop"]
+ .concat(commonToolArgs));
+
+ // test csv file contains 3 docs and collection should have been dropped, so the doc we inserted
+ // should be gone and only the docs from the test file should be in the collection.
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 3);
+ assert.eq(db.c.count({x: 1}), 0);
+
+ // --drop on a non-existent collection should not cause error
+ db.c.drop();
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/csv_header.csv",
+ "--type=csv",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--headerline",
+ "--drop"]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 3);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/fields.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/fields.js
new file mode 100644
index 00000000000..eaa5a0dc0bf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/fields.js
@@ -0,0 +1,107 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with headerline');
+
+ formats = ["csv", "tsv"];
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({"a": "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, {a: "foo", b: "bar", c: {xyz: "blah"}, d: {hij: {lkm: "qwz"}}});
+ assert.eq(coll.count(), 3);
+ };
+
+ var reset = function(coll) {
+ coll.drop();
+ assert.eq(coll.count(), 0);
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var db1 = toolTest.db;
+ var commonToolArgs= getCommonToolArguments();
+ for (var i=0; i<formats.length; i++) {
+ var format=formats[i];
+
+ var c = db1.c.getDB().getSiblingDB(format + "testdb")[format+"testcoll"];
+ // check that headerline uses the correct headers
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_header." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--headerline"]
+ .concat(commonToolArgs));
+
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fields
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fields", "a,b,c.xyz,d.hij.lkm"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fieldsFile
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fieldFile", "jstests/import/testdata/fieldfile"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ // check that without --ignoreBlanks, the empty field is just blank string
+ assert.eq(c.findOne({a: "bob"}).b, "");
+ reset(c);
+
+ // check that --ignoreBlanks causes empty fields to be omitted
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" + format + "_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fieldFile", "jstests/import/testdata/fieldfile",
+ "--ignoreBlanks"]
+ .concat(commonToolArgs));
+ assert.eq(c.findOne({a: "bob"}).b, undefined);
+ reset(c);
+
+ // when --fieldFile, --fields, and --headerline are all omitted,
+ // import should fail
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" + format + "_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+ reset(c);
+
+ }
+
+ var c2 = db1.c.getDB().getSiblingDB("testdb")["extrafields"];
+ // check that extra fields are created as expected
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/extrafields.csv",
+ "--type=csv",
+ "--db", c2.getDB().toString(),
+ "--collection", c2.getName(),
+ "--fieldFile", "jstests/import/testdata/fieldfile"]
+ .concat(commonToolArgs));
+
+ var importedDoc = c2.findOne({"a": "one"});
+ assert.eq(importedDoc.field4, "extra1");
+ assert.eq(importedDoc.field5, "extra2");
+ assert.eq(importedDoc.field6, "extra3");
+ reset(c2);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_document_validation.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_document_validation.js
new file mode 100644
index 00000000000..64fb5ad02b7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_document_validation.js
@@ -0,0 +1,110 @@
+/**
+ * import_document_validation.js
+ *
+ * This file test that mongoimport works with document validation. It both checks that when
+ * validation is turned on invalid documents are not imported and that when a user indicates
+ * they want to bypass validation, that all documents are imported.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ /**
+ * Part 1: Test that import follows document validation rules.
+ */
+ jsTest.log('Testing that import reacts well to document validation');
+
+ var toolTest = getToolTest('import_document_validation');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create 1000 documents, half of which will pass the validation
+ var data = [];
+ for (var i = 0; i < 1000; i++) {
+ if (i%2 === 0) {
+ data.push({_id: i, num: i+1, s: '' + i});
+ } else {
+ data.push({_id: i, num: i+1, s: '' + i, baz: i});
+ }
+ }
+ testDB.bar.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(1000, testDB.bar.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', toolTest.extFile,
+ '-d', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'export should run successfully');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(),
+ 'after dropping the database, no documents should be seen');
+
+ // sanity check that we can import the data without validation
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ assert.eq(1000, testDB.bar.count(),
+ 'after import, the documents should be seen again');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(),
+ 'after dropping the database, no documents should be seen');
+
+ // turn on validation
+ var r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation works');
+
+ // test that it's working
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, "invalid documents shouldn't be inserted");
+
+ // import the 1000 records of which only 500 are valid
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret,
+ 'import against a collection with validation on still succeeds');
+
+ assert.eq(500, testDB.bar.count(), 'only the valid documents are imported');
+
+ /**
+ * Part 2: Test that import can bypass document validation rules.
+ */
+ jsTest.log('Testing that bypass document validation works');
+
+ testDB.dropDatabase();
+
+ // turn on validation
+ r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that we cannot insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, 'invalid documents should not be inserted');
+
+ // import the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar',
+ '--bypassDocumentValidation']
+ .concat(commonToolArgs));
+ assert.eq(0, ret,
+ 'importing documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be imported with bypass document validation set');
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_types.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_types.js
new file mode 100644
index 00000000000..0536ed0caab
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_types.js
@@ -0,0 +1,75 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing importing a json file and checking types');
+
+ var toolTest = getToolTest('import_types');
+
+ // the import file
+ var importFile = 'jstests/import/testdata/types.json';
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('imported');
+ var testColl = testDB.types;
+ testColl.drop();
+ var commonToolArgs = getCommonToolArguments();
+
+ var importTypes = {
+ "double_type": 1,
+ "double_exponent_type": 1,
+ "double_negative_type": 1,
+ "NaN": 1,
+ "infinity": 1,
+ "negative_infinity": 1,
+ "string_type": 2,
+ "object_type": 3,
+ "binary_data": 5,
+ "undefined_type": 6,
+ "object_id_type": 7,
+ "true_type": 8,
+ "false_type": 8,
+ "date_type": 9,
+ "iso_date_type": 9,
+ "null_type": 10,
+ "int32_type": 16,
+ "int32_negative_type": 16,
+ "number_int_type": 16,
+ "int32_hex": 16,
+ "int64_type": 18,
+ "int64_negative_type": 18,
+ "number_long_type": 18,
+ "minkey_type": -1,
+ "maxkey_type": 127,
+ "regex_type": 11,
+ };
+
+
+ // import the data in from types.json
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', importFile,
+ '--db', 'imported',
+ '--collection', 'types']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ jsTest.log("Imported", importFile);
+
+ var postImportDoc = testColl.findOne();
+ printjson(postImportDoc);
+
+ docKeys = Object.keys(importTypes);
+
+ for (var i = 0; i < docKeys.length; i++) {
+ jsTest.log("Checking type of", docKeys[i]);
+ var typeNum = importTypes[docKeys[i]];
+ var field = docKeys[i];
+ var query = {};
+ query[field] = {"$type": typeNum};
+ printjson(query);
+ assert.eq(testColl.find(query).count(), 1);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern.js
new file mode 100644
index 00000000000..be72e82f53f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern.js
@@ -0,0 +1,75 @@
+(function() {
+
+ load("jstests/configs/replset_28.config.js");
+
+ var name = 'import_write_concern';
+ var toolTest = new ToolTest(name, null);
+ var dbName = "foo";
+ var colName = "bar";
+ var rs = new ReplSetTest({
+ name: name,
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ var commonToolArgs = getCommonToolArguments();
+ var fileTarget = "wc.csv";
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+
+ var db = rs.getPrimary().getDB(dbName);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(writeConcern)
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ db.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongoimport',
+ '--writeConcern={w:3}',
+ '--host', rs.getPrimary().host,
+ '--file', fileTarget]
+ .concat(commonToolArgs));
+ }
+
+ // create a test collection
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ db.getCollection(colName).insertMany(data);
+ rs.awaitReplication();
+
+ // export the data that we'll use
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ db.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongoimport", rs, toolTest, writeConcernTestFunc, noConnectTest, testSetup);
+
+ db.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern_mongos.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern_mongos.js
new file mode 100644
index 00000000000..ed15ffd4639
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/import_write_concern_mongos.js
@@ -0,0 +1,80 @@
+(function() {
+
+ load("jstests/configs/replset_28.config.js");
+
+ var name = 'import_write_concern';
+ var toolTest = new ToolTest(name, null);
+ var dbName = "foo";
+ var colName = "bar";
+ var fileTarget = "wc_mongos.csv";
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+
+ var commonToolArgs = getCommonToolArguments();
+ var db = st.s.getDB(dbName);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(writeConcern)
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ db.dropDatabase();
+ }
+
+ function startProgramNoConnect() {
+ return startMongoProgramNoConnect.apply(null, ['mongoimport',
+ '--writeConcern={w:3}',
+ '--host', st.s.host,
+ '--file', fileTarget]
+ .concat(commonToolArgs));
+ }
+
+ // create a test collection
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ db.getCollection(colName).insertMany(data);
+ rs.awaitReplication();
+
+ // setup: export the data that we'll use
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ db.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongoimport", rs, toolTest, writeConcernTestFunc, startProgramNoConnect, testSetup);
+
+ db.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode.js
new file mode 100644
index 00000000000..6eed10bebd1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode.js
@@ -0,0 +1,147 @@
+(function() {
+ jsTest.log('Testing running import with modes');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line cmdArgs');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+
+ var db = db1.getSiblingDB("upserttest");
+ db.dropDatabase();
+
+ var commonToolArgs = [
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ ].concat(getCommonToolArguments());
+
+ function testWithUpsertFields(expectMode, cmdArg) {
+ // This works by applying update w/ query on the fields
+ db.c.drop();
+ var doc1_origin = {a: 1234, b: "000000", c: 222, x: "origin field"};
+ var doc2_1_origin = {a: 4567, b: "111111", c: 333, x: "origin field"};
+ db.c.insert(doc1_origin);
+ db.c.insert(doc2_1_origin);
+ assert.eq(db.c.count(), 2, "collection count should be 2 at setup");
+
+ var argv = ["import",
+ "--file", "jstests/import/testdata/upsert2.json",
+ "--upsertFields", "a,c"];
+ if (cmdArg) {
+ argv.push(cmdArg);
+ }
+ argv = argv.concat(commonToolArgs);
+ var ret = toolTest.runTool.apply(toolTest, argv);
+ if (expectMode === "error") {
+ return assert.neq(ret, 0);
+ }
+ assert.eq(ret, 0);
+
+ var doc1 = db.c.findOne({a: 1234});
+ var doc1_expect;
+ delete doc1["_id"];
+ switch (expectMode) {
+ case "upsert":
+ doc1_expect = {a: 1234, b: "blah", c: 222};
+ break;
+ case "merge":
+ doc1_expect = {a: 1234, b: "blah", c: 222, x: "origin field"};
+ break;
+ default:
+ throw new Error();
+ }
+ assert.docEq(doc1, doc1_expect);
+
+ var doc2_1 = db.c.findOne({a: 4567, c: 333});
+ var doc2_2 = db.c.findOne({a: 4567, c: 222});
+ delete doc2_1["_id"];
+ delete doc2_2["_id"];
+ var doc2_1_expect, doc2_2_expect;
+ switch (expectMode) {
+ case "upsert":
+ doc2_1_expect = {a: 4567, b: "yyy", c: 333};
+ doc2_2_expect = {a: 4567, b: "asdf", c: 222};
+ break;
+ case "merge":
+ doc2_1_expect = {a: 4567, b: "yyy", c: 333, x: "origin field"};
+ doc2_2_expect = {a: 4567, b: "asdf", c: 222};
+ break;
+ default:
+ throw new Error();
+ }
+ assert.docEq(doc2_1, doc2_1_expect);
+ assert.docEq(doc2_2, doc2_2_expect);
+ }
+
+ function testWithoutUpsertFields(expectMode, cmdArg) {
+ // This works by applying the update using _id
+ db.c.drop();
+ var docOrigin = [
+ {_id: "one", a: "origin value", x: "origin field"},
+ {_id: "two", a: "origin value 2", x: "origin field"},
+ ];
+ db.c.insert(docOrigin[0]);
+ db.c.insert(docOrigin[1]);
+ assert.eq(db.c.count(), 2, "collection count should be 2 at setup");
+
+ var argv = ["import", "--file", "jstests/import/testdata/upsert1.json"];
+ if (cmdArg) {
+ argv.push(cmdArg);
+ }
+ argv = argv.concat(commonToolArgs);
+ var ret = toolTest.runTool.apply(toolTest, argv);
+ if (expectMode === "error") {
+ return assert.neq(ret, 0);
+ }
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 2);
+
+ var docs = [
+ db.c.findOne({_id: "one"}),
+ db.c.findOne({_id: "two"}),
+ ];
+ var docExpects = [];
+ switch (expectMode) {
+ case "insert":
+ docExpects = docOrigin;
+ break;
+ case "upsert":
+ docExpects = [
+ {_id: "one", a: "unicorns", b: "zebras"},
+ {_id: "two", a: "xxx", b: "yyy"},
+ ];
+ break;
+ case "merge":
+ docExpects = [
+ {_id: "one", a: "unicorns", b: "zebras", x: "origin field"},
+ {_id: "two", a: "xxx", b: "yyy", x: "origin field"},
+ ];
+ break;
+ default:
+ throw new Error();
+ }
+ assert.docEq(docs, docExpects);
+ }
+
+ // argument-1: expected behavior
+ // argument-2: command argument for mongoimport
+
+ testWithUpsertFields("error", "--mode=wrong");
+ testWithUpsertFields("error", "--mode=insert");
+ testWithUpsertFields("upsert", "");
+ testWithUpsertFields("upsert", "--upsert"); // deprecated cmdArg
+ testWithUpsertFields("upsert", "--mode=upsert");
+ testWithUpsertFields("merge", "--mode=merge");
+
+ testWithoutUpsertFields("error", "--mode=wrong");
+ testWithoutUpsertFields("insert", "--mode=insert");
+ testWithoutUpsertFields("insert", "");
+ testWithoutUpsertFields("upsert", "--upsert"); // deprecated cmdArg
+ testWithoutUpsertFields("upsert", "--mode=upsert");
+ testWithoutUpsertFields("merge", "--mode=merge");
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode_upsert_id_subdoc.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode_upsert_id_subdoc.js
new file mode 100644
index 00000000000..7a935472d3d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/mode_upsert_id_subdoc.js
@@ -0,0 +1,86 @@
+(function() {
+ // This test creates a collection with a subdocument _id field. We export the collection,
+ // replace the existing documents with a pre-made dataset and --mode=upsert, then overwrite
+ // that with the original data, again with --mode=upsert. This verifies that import and
+ // export do not change the order of _id fields.
+ jsTest.log('Testing running import with --mode=upsert and _id subdocuments');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var db = db1.getSiblingDB("upserttest");
+ db.dropDatabase();
+
+ // create a set of documents with a subdocument _id
+ var h, i, j;
+ for (h = 0; h < 2; h++) {
+ var data = [];
+ for (i = h * 50; i < (h+1) * 50; i++) {
+ for (j = 0; j < 20; j++) {
+ data.push({
+ _id: {
+ a: i,
+ b: [0, 1, 2, {c: j, d: "foo"}],
+ e: "bar",
+ },
+ x: "string",
+ });
+ }
+ }
+ db.c.insertMany(data);
+ }
+ assert.eq(db.c.count(), 2000);
+
+ jsTest.log('Exporting documents with subdocument _ids.');
+ var ret = toolTest.runTool.apply(toolTest, ["export",
+ "-o", toolTest.extFile,
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "export should succeed");
+
+ jsTest.log('Upserting pre-made documents with subdocument _ids.');
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/upsert3.json",
+ "--mode=upsert",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "import should succeed");
+ assert.eq(db.c.count(), 2000,
+ "count should be the same before and after import");
+
+ // check each document
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 20; j++) {
+ assert.eq(db.c.findOne({_id: {a: i, b: [0, 1, 2, {c: j, d: "foo"}], e: "bar"}}).x, "str2",
+ "all documents should be updated");
+ }
+ }
+
+ jsTest.log('Upserting original exported documents with subdocument _ids.');
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--mode=upsert",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "import should succeed");
+ assert.eq(db.c.count(), 2000,
+ "count should be the same before and after import");
+
+ // check each document to see that it is back at its original value
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 20; j++) {
+ assert.eq(db.c.findOne({_id: {a: i, b: [0, 1, 2, {c: j, d: "foo"}], e: "bar"}}).x, "string",
+ "all documents should be updated");
+ }
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/no_primary_error_code.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/no_primary_error_code.js
new file mode 100644
index 00000000000..8d9c4c5e451
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/no_primary_error_code.js
@@ -0,0 +1,65 @@
+/**
+ * no_primary_error_code.js
+ *
+ * This file tests TOOLS-690 where mongoimport returned exit code 0 when it should have returned
+ * exit code 1 on error. The error stems from when mongos cannot find a primary. This file checks
+ * that errors of type 'not master', 'unable to target', and 'Connection refused' yield error
+ * code 1.
+ */
+(function() {
+ 'use strict';
+ jsTest.log('Testing mongoimport when a sharded cluster has no primaries');
+
+ var sh = new ShardingTest({
+ name: 'no_primary_error_code',
+ shards: 1,
+ verbose: 0,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 1,
+ chunksize: 1,
+ enableBalancer: 0,
+ },
+ });
+
+ // If we can't find a primary in 20 seconds than assume there are no more.
+ var primary = sh.rs0.getPrimary(20000);
+
+ jsTest.log('Stepping down ' + primary.host);
+
+ try {
+ primary.adminCommand({replSetStepDown: 300, force: true});
+ } catch (e) {
+ // Ignore any errors that occur when stepping down the primary.
+ print('Error Stepping Down Primary: ' + e);
+ }
+
+ // Check that we catch 'not master'
+ jsTest.log('All primaries stepped down, trying to import.');
+
+
+ var ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.getDB('test').dropDatabase();
+
+ // Kill the replica set.
+ sh.rs0.stopSet(15);
+
+ // Check that we catch 'Connection refused'
+ jsTest.log('All primaries died, trying to import.');
+
+ ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/options.js
new file mode 100644
index 00000000000..c9027313210
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/options.js
@@ -0,0 +1,123 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('bad_options');
+ var db1 = toolTest.db;
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool("export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName());
+
+ // also make a CSV version of it
+ toolTest.runTool("export",
+ "--out", toolTest.extFile + ".csv",
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName(),
+ "--fields", "a,b,c",
+ "--csv");
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+ // verify that the normal sane case works
+ var ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test");
+ assert.eq(ret, 0);
+
+ var testDb = db1.c.getDB().getSiblingDB("test");
+ assert.eq.soon(2, testDb.test.count.bind(testDb.test), "test.test should have 2 records");
+ testDb.test.drop();
+
+ var testScenarios = [
+ {args: [],
+ desc: "importing with no args should fail"},
+
+ {args: [toolTest.extFile, toolTest.extFile],
+ desc: "importing with multiple positional args should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, toolTest.extFile],
+ desc: "specifying both a --file and a positional argument should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", "non-existent-file.json"],
+ desc: "specifying a --file with a nonexistent filename should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", "."],
+ desc: "specifying a --file with a directory name should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type", "bogus"],
+ desc: "importing with an invalid --type should fail"},
+
+ {args: ["--db", "x.y.z", "-c", "test", "--file", toolTest.extFile],
+ desc: "importing with an invalid database name (. in name) should fail"},
+
+ {args: ["--db", "$x", "-c", "test", "--file", toolTest.extFile],
+ desc: "importing with an invalid database name ($ in name) should fail"},
+
+ {args: ["--db", "test", "-c", "blah$asfsaf", "--file", toolTest.extFile],
+ desc: "importing with an invalid collection name should fail"},
+
+ {args: ["--db", "test", "-c", "blah$asfsaf", "--file", toolTest.extFile],
+ desc: "importing with an invalid collection name should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,$xz,b"],
+ desc: "--fields containing a field containing a $ should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=json", "--fields", "a,b"],
+ desc: "specifying --fields with --json should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--headerline", "--fields", "a,b", "--type=csv"],
+ desc: "specifying both --fields and --headerline should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b", "--fieldFile", toolTest.extFile + ".csv"],
+ desc: "specifying both --fields and --fieldFile should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--headerline", "--fieldFile", toolTest.extFile + ".csv"],
+ desc: "specifying both --headerline and --fieldFile should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b,b"],
+ desc: "--fields with duplicate field names should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b,b.c"],
+ desc: "--fields with field names of overlapping structures should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=csv", "--fields", "a,b,b.c"],
+ desc: "--fields with field names of overlapping structures should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--upsertFields", "a,$b"],
+ desc: "invalid characters in upsertFields should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--jsonArray"],
+ desc: "using --jsonArray with a non-array input file should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=json"],
+ desc: "using --type=json with invalid json should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=csv", "--fields=a,b,c"],
+ desc: "using --type=csv with invalid csv should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=json", "--headerline"],
+ desc: "using --type=json with headerline should fail"},
+ ];
+
+ for (var i=0; i<testScenarios.length; i++) {
+ jsTest.log('Testing: ' + testScenarios[i].desc);
+ ret = toolTest.runTool.apply(toolTest, ["import"].concat(testScenarios[i].args));
+ assert.neq(0, ret, i + ": " + testScenarios[i].desc);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/parse_grace.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/parse_grace.js
new file mode 100644
index 00000000000..5726ecaafd9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/parse_grace.js
@@ -0,0 +1,113 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var expectedDocs = [{
+ a: "foo",
+ b: 12,
+ c: {
+ xyz: ISODate("1997-06-02T15:24:00Z"),
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M=")}},
+ }, {
+ a: "bar",
+ b: 24,
+ c: {
+ xyz: "06/08/2016 09:26:00",
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }, {
+ a: "baz",
+ b: 36,
+ c: {
+ xyz: ISODate("2016-06-08T09:26:00Z"),
+ noop: false,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }];
+ jsTest.log('Testing parseGrace option');
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({a: "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[0]);
+ importedDoc = coll.findOne({a: "baz"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[2]);
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var commonToolArgs = getCommonToolArguments();
+ var d = toolTest.db;
+ var c;
+
+ // parseGrace=fail should cause a failure
+ c = d.testcoll1;
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", d.getName(),
+ "--collection", c.getName(),
+ "--columnsHaveTypes",
+ "--parseGrace", "stop",
+ "--headerline"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+
+ // parseGrace=skipRow should not import the row
+ // with an un-coercable field
+ c = d.testcoll2;
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", d.getName(),
+ "--collection", c.getName(),
+ "--columnsHaveTypes",
+ "--parseGrace", "skipRow",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 2);
+
+ // parseGrace=skipField should not import the
+ // an un-coercable field, but still keep the rest
+ // of the row
+ c = d.testcoll3;
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", d.getName(),
+ "--collection", c.getName(),
+ "--columnsHaveTypes",
+ "--parseGrace", "skipField",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 3);
+ assert.neq(c.findOne({a: "bar"}), null);
+ assert.eq(c.findOne({a: "bar"}).c.xyz, undefined);
+
+ // parseGrace=autoCast should import the un-coercable field
+ c = d.testcoll4;
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", d.getName(),
+ "--collection", c.getName(),
+ "--columnsHaveTypes",
+ "--parseGrace", "autoCast",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 3);
+ var importedDoc = c.findOne({a: "bar"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[1]);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/replset.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/replset.js
new file mode 100644
index 00000000000..7d7fd1c73f3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/replset.js
@@ -0,0 +1,48 @@
+(function() {
+ jsTest.log('Testing running import with upserts');
+
+ var toolTest = new ToolTest('import_repl');
+
+ var replset1 = new ReplSetTest({nodes: 3, name: 'importtest'});
+ replset1.startSet();
+ replset1.initiate();
+
+ var primary = replset1.getPrimary();
+ var secondary = replset1.getSecondary();
+
+ var db = primary.getDB('import_repl_test');
+
+ // trying to write to the secondary should fail
+ assert.neq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', secondary.host]), 0,
+ "writing to secondary should fail");
+
+ assert.eq(db.c.count(), 0, 'database not empty');
+
+ // now import using the primary
+ assert.eq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', primary.host]), 0,
+ "writing to primary should succeed");
+
+ assert.neq(db.c.count(), 0, 'database unexpectedly empty on primary');
+
+ db.dropDatabase();
+
+ // import using the secondary but include replset name, should succeed
+ assert.eq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', replset1.name + "/" + secondary.host]), 0,
+ "writing to secondary with replset name should succeed");
+
+ assert.neq(db.c.count(), 0, 'database unexpectedly empty on secondary');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/stoponerror.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/stoponerror.js
new file mode 100644
index 00000000000..acf6e7e5437
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/stoponerror.js
@@ -0,0 +1,40 @@
+(function() {
+ jsTest.log('Testing running import with upserts');
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('import_dupes');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var db = db1.getSiblingDB("dupetest");
+ db.dropDatabase();
+
+ // Verify that --mode=upsert with --upsertFields works by applying update w/ query on the fields
+ db.c.insert({_id: 1234, b: "000000", c: 222});
+ assert.eq(db.c.count(), 1, "collection count should be 1 at setup");
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/dupes.json",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--stopOnError"]
+ .concat(commonToolArgs));
+
+ assert.neq(ret, 0,
+ "duplicate key with --stopOnError should return nonzero exit code");
+
+ // drop it, try again without stop on error
+ db.c.drop();
+ db.c.insert({_id: 1234, b: "000000", c: 222});
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/dupes.json",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0,
+ "duplicate key without --stopOnError should return zero exit code");
+ assert.docEq(db.c.findOne({_id: 1234}), {_id: 1234, b: "000000", c: 222});
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_header.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_header.csv
new file mode 100644
index 00000000000..4c308f094b1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_header.csv
@@ -0,0 +1,4 @@
+a,b,c.xyz,d.hij.lkm
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_noheader.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_noheader.csv
new file mode 100644
index 00000000000..15427ed2b89
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/csv_noheader.csv
@@ -0,0 +1,3 @@
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/extrafields.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/extrafields.csv
new file mode 100644
index 00000000000..945dedb557a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/extrafields.csv
@@ -0,0 +1,3 @@
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four,extra1,extra2,extra3
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/fieldfile b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/fieldfile
new file mode 100644
index 00000000000..d08b7dd21f5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/fieldfile
@@ -0,0 +1,4 @@
+a
+b
+c.xyz
+d.hij.lkm
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/parse_grace.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/parse_grace.csv
new file mode 100644
index 00000000000..447c1bd647e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/parse_grace.csv
@@ -0,0 +1,4 @@
+a.string(),b.int32(),"c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)",c.noop.boolean(),d.hij.lkm.binary(hex)
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"06/08/2016 09:26:00",true,746573740a
+baz,36,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_header.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_header.tsv
new file mode 100644
index 00000000000..d10280f5e04
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_header.tsv
@@ -0,0 +1,4 @@
+a b c.xyz d.hij.lkm
+foo bar blah qwz
+bob steve sue
+one two three four
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv
new file mode 100644
index 00000000000..3729293b3ac
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv
@@ -0,0 +1,3 @@
+foo bar blah qwz
+bob steve sue
+one two three four
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv
new file mode 100644
index 00000000000..8d398a3745f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv
@@ -0,0 +1,3 @@
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
+one,2,"May 08, 2016 09:26:00",false,746573740a,extra1,extra2
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv
new file mode 100644
index 00000000000..a4ca42f4589
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv
@@ -0,0 +1,3 @@
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
+one 2 May 08, 2016 09:26:00 false 746573740a extra1 extra2
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.csv
new file mode 100644
index 00000000000..1140f31b20b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.csv
@@ -0,0 +1,3 @@
+a.string(),b.int32(),"c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)",c.noop.boolean(),d.hij.lkm.binary(hex)
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.tsv
new file mode 100644
index 00000000000..a80b16848be
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_header.tsv
@@ -0,0 +1,3 @@
+a.string() b.int32() c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss) c.noop.boolean() d.hij.lkm.binary(hex)
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.csv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.csv
new file mode 100644
index 00000000000..50eeda2d83f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.csv
@@ -0,0 +1,2 @@
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv
new file mode 100644
index 00000000000..a4eb1896c1c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv
@@ -0,0 +1,2 @@
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typedfieldfile b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typedfieldfile
new file mode 100644
index 00000000000..0068166003f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/testdata/typedfieldfile
@@ -0,0 +1,5 @@
+a.string()
+b.int32()
+c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)
+c.noop.boolean()
+d.hij.lkm.binary(hex)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/type_case.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/type_case.js
new file mode 100644
index 00000000000..755031f2f0d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/type_case.js
@@ -0,0 +1,98 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('bad_options');
+ var db1 = toolTest.db;
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool("export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName());
+
+ // also make a CSV version of it
+ toolTest.runTool("export",
+ "--out", toolTest.extFile + ".csv",
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName(),
+ "--csv",
+ "--fields", "a,b,c");
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+ // verify that the normal sane case works
+ var ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test");
+ assert.eq(ret, 0);
+
+ // verify that the a lower case json type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=json");
+ assert.eq(ret, 0);
+
+ // verify that the a upper case json type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=JSON");
+ assert.eq(ret, 0);
+
+ // verify that the a csv type specifier failes to load a json file
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=csv",
+ "-f", "a,b,c");
+ assert.eq(ret, 1);
+
+ // verify that the a lower case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=csv",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ // verify that the a upper case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=CSV",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ // verify that the a mixed case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=cSv",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ var testDb = db1.c.getDB().getSiblingDB("test");
+ assert.eq.soon(11, testDb.test.count.bind(testDb.test), "test.test should have 11 records");
+ testDb.test.drop();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/typed_fields.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/typed_fields.js
new file mode 100644
index 00000000000..ce290e56995
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/typed_fields.js
@@ -0,0 +1,114 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var formats = ["csv", "tsv"];
+ var header = "a.string(),b.int32(),c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss),c.noop.boolean(),d.hij.lkm.binary(hex)";
+ var expectedDocs = [{
+ a: "foo",
+ b: 12,
+ c: {
+ xyz: ISODate("1997-06-02T15:24:00Z"),
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M=")}},
+ }, {
+ a: "bar",
+ b: 24,
+ c: {
+ xyz: ISODate("2016-06-08T09:26:00Z"),
+ noop: false,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }];
+ jsTest.log('Testing typed fields in CSV/TSV');
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({a: "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[0]);
+ importedDoc = coll.findOne({a: "bar"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[1]);
+ assert.eq(coll.count(), 2);
+ };
+
+ var reset = function(coll) {
+ coll.drop();
+ assert.eq(coll.count(), 0);
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var db1 = toolTest.db;
+ var commonToolArgs= getCommonToolArguments();
+ for (var i=0; i<formats.length; i++) {
+ var format=formats[i];
+
+ var c = db1.c.getDB().getSiblingDB(format + "testdb")[format+"testcoll"];
+ // check that headerline uses the correct headers
+ var ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_header." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--headerline"]
+ .concat(commonToolArgs));
+
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fields
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fields", header]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fieldsFile
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fieldFile", "jstests/import/testdata/typedfieldfile"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // when --fieldFile, --fields, and --headerline are all omitted,
+ // import should fail
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+ reset(c);
+
+ // check that extra fields are created as expected
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_extrafields." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fieldFile", "jstests/import/testdata/typedfieldfile"]
+ .concat(commonToolArgs));
+
+ var importedDoc = c.findOne({"a": "one"});
+ assert.eq(importedDoc.field5, "extra1");
+ assert.eq(importedDoc.field6, "extra2");
+ reset(c);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/types.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/types.js
new file mode 100644
index 00000000000..6ecc7129387
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/import/types.js
@@ -0,0 +1,117 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ jsTest.log('Testing running import with various data types');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var testDoc = {
+ _id: ObjectId(),
+ a: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M="),
+ b: Boolean(1),
+ d: "this is a string",
+ e: ["this is an ", 2, 23.5, "array with various types in it"],
+ f: {"this is": "an embedded doc"},
+ g: function () {
+ print("hey sup");
+ },
+ h: null,
+ i: true,
+ j: false,
+ k: NumberLong(10000),
+ l: MinKey(),
+ m: MaxKey(),
+ n: ISODate("2015-02-25T16:42:11Z"),
+ o: DBRef('namespace', 'identifier', 'database'),
+ p: NumberInt(5),
+ q: 5.0,
+ };
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save(testDoc);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "testcoll2"]
+ .concat(commonToolArgs));
+ var postImportDoc = db1.c.getDB().getSiblingDB("imported").testcoll2.findOne();
+
+ printjson(postImportDoc);
+
+ for (var docKey in testDoc) {
+ if (!testDoc.hasOwnProperty(docKey)) {
+ continue;
+ }
+ jsTest.log("checking field " + docKey);
+ if (typeof testDoc[docKey] === 'function') {
+ // SERVER-23472: As of 3.3.5, JS functions are serialized when inserted,
+ // so accept either the original function or its serialization
+ try {
+ assert.eq(testDoc[docKey], postImportDoc[docKey],
+ "function does not directly match");
+ } catch (e) {
+ assert.eq({code: String(testDoc[docKey])}, postImportDoc[docKey],
+ "serialized function does not match");
+ }
+ continue;
+ }
+ assert.eq(testDoc[docKey], postImportDoc[docKey],
+ "imported field " + docKey + " does not match original");
+ }
+
+ // DBPointer should turn into a DBRef with a $ref field and hte $id field being an ObjectId. It will not convert back to a DBPointer.
+
+ var oid = ObjectId();
+ var irregularObjects = {
+ _id: ObjectId(),
+ a: DBPointer('namespace', oid),
+ b: NumberInt("5"),
+ c: NumberLong("5000"),
+ d: 5,
+ e: 9223372036854775,
+ };
+
+ db1.c.drop();
+ db1.c.getDB().getSiblingDB("imported").testcoll3.drop();
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save(irregularObjects);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "testcoll3"]
+ .concat(commonToolArgs));
+ postImportDoc = db1.c.getDB().getSiblingDB("imported").testcoll3.findOne();
+
+ printjson(postImportDoc);
+
+ var dbRef = DBRef("namespace", oid);
+ assert.eq(postImportDoc["a"], dbRef);
+
+ assert.eq(postImportDoc["b"], 5);
+ assert.eq(postImportDoc["d"], 5);
+
+ var numLong = NumberLong(5000);
+ assert.eq(postImportDoc["c"], numLong);
+
+ numLong = NumberLong(9223372036854775);
+ assert.eq(postImportDoc["e"], numLong);
+
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/.eslintrc.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/.eslintrc.yml
new file mode 100644
index 00000000000..1750fda88a5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/.eslintrc.yml
@@ -0,0 +1,3 @@
+rules:
+ no-unused-vars: 0
+ no-empty-function: 0
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/analyze_plan.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..b930470fdb5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/analyze_plan.js
@@ -0,0 +1,76 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ } else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ } else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ } else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ } else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/authTestsKey b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/badSAN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/badSAN.pem
new file mode 100644
index 00000000000..d8e362731e0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/badSAN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgIDAYKXMA0GCSqGSIb3DQEBBQUAMHQxFzAVBgNVBAMTDktl
+cm5lbCBUZXN0IENBMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzAeFw0xNDA5MjMxNTE3MjNaFw0zNDA5MjMxNTE3MjNaMG8xEjAQBgNV
+BAMTCTEyNy4wLjAuMTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCDB/lxuzeU
+OHR5nnOTJM0fHz0WeicnuUfGG5wP89Mbkd3Y+BNS0ozbnkW+NAGhD+ehNBjogISZ
+jLCd+uaYu7TLWpkgki+1+gM99Ro0vv7dIc8vD7ToILKMbM8xQmLbSxDT2tCUoXlc
+m7ccgDZl9oW1scQYQ8gWHjmk3yK8sCoGa/uwr49u74aVM7673tLsK41m8oYPzt/q
+VGT+mXpBJQcGXkTNQtIPxBtD25jr+aPietS3u70zrVPY6ZDsGE7DofEeRl97kVoF
+NcpaQmVEwEo8KCWaT6OaPaUUUjAMwzqiZaHNZ6mL1pCr65bLXP6T9tiMtWLw5+SG
+3E09fhQuWod5AgMBAAGjFTATMBEGA1UdEQQKMAiCBmJhZFNBTjANBgkqhkiG9w0B
+AQUFAAOCAQEAQzlibJvlUpJG3vc5JppdrudpXoVAP3wtpzvnkrY0GTWIUE52mCIf
+MJ5sARvjzs/uMhV5GLnjqTcT+DFkihqKyFo1tKBD7LSuSjfDvjmggG9lq0/xDvVU
+uczAuNtI1T7N+6P7LyTG4HqniYouPMDWyCKBOmzzNsk+r1OJb6cxU7QQwmSWw1n1
+ztNcF6JzCQVcd9Isau9AEXZ9q0M0sjD9mL67Qo3Dh3Mvf4UkJKqm3KOQOupUHZLU
+vJwfsS2u+gfHY1Plywzq3AuT7ygbksR3Pqfs8LFPnuRAH+41sFTGUM52hiU7mNPj
+ebl8s1tjK7WQ+a8GTABJV0hDNeWd3Sr+Og==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAgwf5cbs3lDh0eZ5zkyTNHx89FnonJ7lHxhucD/PTG5Hd2PgT
+UtKM255FvjQBoQ/noTQY6ICEmYywnfrmmLu0y1qZIJIvtfoDPfUaNL7+3SHPLw+0
+6CCyjGzPMUJi20sQ09rQlKF5XJu3HIA2ZfaFtbHEGEPIFh45pN8ivLAqBmv7sK+P
+bu+GlTO+u97S7CuNZvKGD87f6lRk/pl6QSUHBl5EzULSD8QbQ9uY6/mj4nrUt7u9
+M61T2OmQ7BhOw6HxHkZfe5FaBTXKWkJlRMBKPCglmk+jmj2lFFIwDMM6omWhzWep
+i9aQq+uWy1z+k/bYjLVi8OfkhtxNPX4ULlqHeQIDAQABAoIBAC4Bx8jyJmKpq+Pk
+CcqZelg6HLXesA7XlGbv3M0RHIeqoM2E1SwYd5LJMM3G7ueBcR/97dz8+xH6/yyJ
+Ixxvk9xu9CMmkRABN9AyVkA867nzHA73Idr7WBXMQreWCqXa5o6sXt5BEB6/If0k
+23TTqUERqLuoWQHDHRRRsJ218RuNmbvBe8TGXcfunC0eeDVKDeqAXol6bD5lztdu
+B6jkdLt5UZSQ7X8OmClbeDlac90B8usNi+pUE9q1p7X462vAw8LohkxLY2nyIcmU
+feNdTNHP+lklv+E+p9w/Az7Hf6zxm525tw90QVI048fr9SL3ftLHOt4FhucSCn0Z
+CjylP4ECgYEA+nQrNVdVwmxcWCVn69LR1grNXUSz+fLHCo+QKma4IyC1kuuZ+BBo
+Iwdf9t/S1tgtTYru3uxzCpQg7J1iDeEFEsMHl0rc6U1MmIE+6OvACVG3yotqoOqE
+852pi1OWIe94yTk2ZmNXJ8gpUE/gtMprbcSWOb7IzzrXy2lDcaEMuGkCgYEAhe7L
+ZvYI4LEvu6GSPp97qBzDH9m5UrHaTZIJk/Nu7ie919Sdg62LTfphsaK+pSyA55XQ
+8L9P7wNUPC44NnE+7CIJZsIuKdYqR5QI6No9RdTyij0Hgljfc7KuH2b8lf8EjvuH
+qZAf5zL3pIOQs8E8/MYHlGIqmTkYK41eCAcS9JECgYEADnra6KmU9rmnGR2IhZTZ
+tuNG/kZzlVbY9R5ZumnX6YgBl23xp+ri6muJu88y9GLpM5t9tfu7pvfrc2KiAaVp
+0qzd6nxUi1SBwituxK6kmqVT1+z5jDYi26bY34pEms+qjw+0unSx3EXxRYhouGsf
+jOgZu1rxZzHCuirq0E38W0kCgYBzOK16RX37t9OFywlioJekWCIxu4BouSNCirl8
+s/eiIUR8cqiUCPAIRLhZNtZmiTPYiBW5mAyvZiDIqUao56InSVznL3TBf0LeU2ea
+023VLs79yGU2aTjLc1PDJjl03XDRhWj/okMgBsPvn1QUoNDT8ZXBvPZC3VCC31qe
+818GUQKBgQDBUP2BC/Th/0dErOQ5lWkY3YbmzrTp2pDsHGZJRD+OdQ5B8FUvCP8m
+JESk/0ATn7niUqawnOy/2KlKIkeBBV2XL1rjIGEhCkBUuhCiInNDqz1AGdXzIKaT
+myoZ4PhIsH1D643e6iLhyAZuUAA4yB31E2a3l7EMyhV3vKbdWWygGQ==
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/ca.pem
new file mode 100644
index 00000000000..eedfb473d4b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/ca.pem
@@ -0,0 +1,102 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml ca.pem
+#
+# Primary Root Certificate Authority Most Certificates are issued by this CA.
+-----BEGIN CERTIFICATE-----
+MIIDdDCCAlwCBBmRIxIwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjczOVoXDTM5MDkyNzIzMjczOVowdDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5l
+bCBUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAupVkx8+n
+AqzsANKwNPeCYlf2q0WgF4kSUMNJdpmMelrr7hh7EOnAU0hTAQx9BKTEbExeCzH6
+OArFNGjewjWVXwaOpCjK8FMvK6/lGVEpmoHNF9XuiQVmaQ4bJD6rC73YjpgNIPeL
+5PyoFLEZv+X2cRBPpTcSRcf87tk8HL7v0eyk1JBhkeKK68SYdWwZlHaa1jqwmliW
+WvVMkHVH3lx0VOgQwWtOgs0K1zpcZ0sH5MGpYRQOiidIRZj3PkKeTPQe2D6VQQtv
+2yDs9dWfCxJJP9QiWclL2rF/xqlFSNEIfNZpZhk6I1DHQpA2uyJfzRH62pFasJuB
+CVh5Tr0EDoVreQIDAQABoxMwETAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
+CwUAA4IBAQARdNCYYWxi2fyhJwzGHwIT261d/pTlOSYLlm84c72aEneFUnfp8/H5
+JjuFbnhiX+5+h3M7eDQhra9s+H3vKr7o38EIVf5OKXvpNLwv1UUmomBvKqccioYh
+bxrfwCzfBRuUmW05kcAVn8iKovqyxL7npEZbckwtT+BqZ4kOL4Uzre+S1HMx0zOu
+xulSYA/sBoJ2BB93ZIAqB+f/+InS9yggzyhhaQqS7QEl1L4nZE4Oy0jKcxdCzysm
+TqiyH+OI5SVRTfXh4XvHmdWBBaQyaTmQzXYUxUi7jg1jEAiebCGrEJv9plwq4KfC
+cze9NLBjaXR3GzonT8kICyVT/0UvhuJg
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6lWTHz6cCrOwA
+0rA094JiV/arRaAXiRJQw0l2mYx6WuvuGHsQ6cBTSFMBDH0EpMRsTF4LMfo4CsU0
+aN7CNZVfBo6kKMrwUy8rr+UZUSmagc0X1e6JBWZpDhskPqsLvdiOmA0g94vk/KgU
+sRm/5fZxEE+lNxJFx/zu2Twcvu/R7KTUkGGR4orrxJh1bBmUdprWOrCaWJZa9UyQ
+dUfeXHRU6BDBa06CzQrXOlxnSwfkwalhFA6KJ0hFmPc+Qp5M9B7YPpVBC2/bIOz1
+1Z8LEkk/1CJZyUvasX/GqUVI0Qh81mlmGTojUMdCkDa7Il/NEfrakVqwm4EJWHlO
+vQQOhWt5AgMBAAECggEATMiSEtBXsHgtHyGVNhbtZz8/2bfrbAQSr33OS6pg7zFf
+ijo02v73EM7gqbvT7wdHdjHvObg1er6j7S+DDHU0pUhzhwbqjQAOl3HpXAPUlSr5
+leeKJIU+YdlWPhI0Hb7g0b2tGXYF8hQvr2Q6bohaZKkLvbfuIx1r73cij8nbs411
+MU892GBVZRcMGITLT7W1BFWYGmjqIH7FabSpFpIxeEX2ONooHHOYBlj3dJs8WdGr
+/gGJSYq1YGUbVQLIn5m9JmYuFShEhcSrGVAdKto2qRqUpTaePXLU8dozInejFMVb
+yul5fwPuUGgDz+x6wKWRSA8138uaEHQl7r5DqOv6oQKBgQDkYg37TdSC0cCWxL/J
+vFzMPQ/p9iT8ZgOA1lvUUkpwhGOdAqAiR35vZPwFtu3sSl/BUER7ZDf+ILfdPzW5
+wFiWWAkrS8jWl9RaFwkjeTq5fwv3kJGwPwVvCzjLjX7tUDCJv8azOuIxoindCnnC
+y5HXm6hOQZS26lZqNDzsBFzWdQKBgQDRJV9+7gRyZhl8PIBO91Hl5C+wTBFjFFrH
+TJRnT0pNX5FRkPZoyPgJD44S0zFg4oWKl1r2YvfwCQZtQl3h2ZDDGnEE7mJ0++hi
++UUfLyF9dXq27aK4mJsDkdp5Hi6vqfBETPPPyHffwY/UFLOsseqU+5aVG/7Hk6Th
+r2jQzNzIdQKBgQCx1SRb9YuvXdKf044fcNu1cSquHCtKmcjKjp+soXMzT/Mc9Elq
+x87MwI55iKqU3ojXR0A78Jqk+CcTUT/tZrfUQbLKEtXNOyZwDBXimGhAvou79sdq
+vHfnso5D+ebGtTVGXZ1EPGqbCVGdu6V6J/dlMuCIJwq8r5YgVpLFmNQNbQKBgQCD
+PNRjra+UqYSsdLp+0qrNexaBpfnzFj6weNlWymZxjtp9OF2m+xTJer1ICsYdaYcq
+pDcsDIZQVRl3TgHjSsaP5vOsiHm3tqroEErTepulQiayywMkmn4LC2bwQYRCLnza
+Hv+PDthJzAgYqLTmVO5CdmzTPDHvwjHgfFVlUGfqUQKBgE16f0Euzx3qySIJmMrR
+dNI/y8FSSLgoxWPX7WQIpnXhbNeqqlwGNOi8beSkiRsaL6zJcR3tTPkOfsSeIiCf
+yG73jB3F1L8A5dX2YrKOJOzSxrByzVDnfrukCuxkcW0N31OJ0sFiq9Kjcb/9zyiI
+BQTIxkN91Squn4Y+I3ikyoc3
+-----END PRIVATE KEY-----
+# Certificate from ca-2019.pem
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client.pem
new file mode 100644
index 00000000000..54e73f889fc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client.pem
@@ -0,0 +1,54 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml client.pem
+#
+# General purpose client certificate.
+-----BEGIN CERTIFICATE-----
+MIIDqzCCApMCBAlumm0wDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjc0MFoXDTM5MDkyNzIzMjc0MFowcDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYDVQQDDAZj
+bGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChwxZmhXIUlErr
+Na+gpnBjMigZPXakQLvpWZ02PBbXCfwLAPfEw5B2QH7Y01pxnFiGKY60CiMr1lpA
+IcY60Po4MjzJEVOn1xaVrcaXrD8nCxk8WgndRsd6L7O36etA+zlrndTZLyB1RZza
+xrcuKJ8fELAEFfDdrZOJz1IZ82S81N3U8CX4t8HSKtMBhRTskGxDFGGKrpm9i4ly
+WPcKLxHW5N0C7gBpyFsB6cOs87VFOtL2/iQBaOsF5aRuvNMvuyxtwXy8prJXzh7d
+8N5WaXa8qnR0WW5Kwgs+Snuzi6LD88hGRUv99ZPNFrRYfUPR9nerc+DcsBXklUAD
+1CgKXq+/AgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM
+MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQnVPhN4rWyrA1qxruucxPplu6V5jANBgkq
+hkiG9w0BAQsFAAOCAQEAH4pD8iA1BgpLEIfKYzBA0Qv/ZgCRNtvMz4bdY0rIUjAJ
+0QGOr4Bj0RFj3CJgrhgdXyb07ZcpzHMHzMjwQxPV+1YlxzUJeH6VMflMuemh0oL6
+QZ3YO7eUs174vnAG1ofQ41g5lOJoI3d8dVM6jeYQiJ4W0WHRXUMwJ9EasRWmcx+0
+McPZlJx/ScJRghGrVpKfdxwlq2GOmC5ALW3zFDBkZGanVxSSFlyxfczBms9ZmqTv
+wk+Jt4yoGSnK3eEDR37BBiKGMTUjIodjcOaPUxCsUOITfa6/cBb83Ot/XKtSLAwd
+J/7CxGmyBzwkTSoxgrVVDBriClB7P3rsRDrcvTkVgQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQChwxZmhXIUlErr
+Na+gpnBjMigZPXakQLvpWZ02PBbXCfwLAPfEw5B2QH7Y01pxnFiGKY60CiMr1lpA
+IcY60Po4MjzJEVOn1xaVrcaXrD8nCxk8WgndRsd6L7O36etA+zlrndTZLyB1RZza
+xrcuKJ8fELAEFfDdrZOJz1IZ82S81N3U8CX4t8HSKtMBhRTskGxDFGGKrpm9i4ly
+WPcKLxHW5N0C7gBpyFsB6cOs87VFOtL2/iQBaOsF5aRuvNMvuyxtwXy8prJXzh7d
+8N5WaXa8qnR0WW5Kwgs+Snuzi6LD88hGRUv99ZPNFrRYfUPR9nerc+DcsBXklUAD
+1CgKXq+/AgMBAAECggEBAJNaNeaP+tp6N0DJd8G8u7Xws4xCa/e1rDcl1crYs+mR
+A9I2QH0xtD+0Apia9tF1HUKhSogAkAdXzICNA5kCUlysjqiOKwlCKiWQ1c3GLwTu
+3D8YudGirwVDvM90u0NHVggNDx4ECuoozniP+b+Ha2ON/PjLz8zvV+16OIzBJWvI
+fUkeuyHWsYrh5wNdjW9pT0+N85q9pzuGGthoshR4d5z2bKhm9MKA8mz1p3DMS7a3
+F2AiJPWkvzUksZ/h/WXOk19DhmG7lHkNEzfm/RsUjfswx0Eoz3gmNkH7oS3yYNJD
+yDHt6553zaP0UA+mJzaHqaw+JN1Vxdva+IYNLs+ekLkCgYEA1kjar3m7RiGXu8ZL
+lew3Hhwp0W4PjhfohdLAhyLAbL+8fQl7Blz21wz9/E77JpFQUmpfWhD9V39gvpk9
+X3/dhAX2dbZFphWEPNReF5oWDDiQzZLnfv3UqKQ3/Q58hKhujINIIvPbqyhDz4AW
+zhLKN/JLmNo5mquWOGRvvAcoweMCgYEAwUC0xrZqf8G6y6JGk24u3coY9dmkohNZ
+L/GfKK059b/Px0k05WaIXRxKWlRhIHQjhqVoctjd061WDgHoa8jkS5ARbqoxJeZI
+izx11MDfbdykv/rIm+mpXl1B/WjQ+oV8wg5J8Sz5o8bXlcr7SlWoj/bOFDgfUsG2
+cTBdLx8KEXUCgYEAw8CILfcZkmaLs1jhodLJQzNdLFnL0fWbT+0IPisL+AxsBxmL
+DnQMdsYCm+QW4NOsua088A1HMhBF87gPbddkKAA+dqgNFjzfsaYbUppLe9yMw49o
+9QSyqWBjWURLkfBAFRK06mE1EjVVRRBxRM27d8JbNwZbyyTmtRtjSzSLEhsCgYEA
+qMTrm0afh2cWzNOxMvvcrhDa74nc/zZKg1ZOGuX9YiLbQD0sltKOcFJOlMhv8jm6
+9NHrf7DpOP1908nSwp600VDGCJFVTrXn0MFCXkT6WyWAM7C+FXtgUGq9QKOTLdX5
++DcmWBthAKicidK01lwsU13E51+D63AE9qC4IHjJACkCgYBlUwyanKuhhmKMJo6b
+PBhpUAruP9LRIzoYYlGZcZhaJXKyfrzc0pMM0Xmxk0avG6yfvttu4jndeK/dPjwY
+Fbp6+x+iWUykgPbDMMij1WlC7p/RijenN+vQlHzhB+Zyl8jMjpANHnF2PunaaNa2
+K0ZXGPQcmZrvdOPG7+q9wMMVxg==
+-----END PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..03db67deb50
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/client_revoked.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBAjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB4MRcwFQYDVQQD
+Ew5jbGllbnRfcmV2b2tlZDETMBEGA1UECxMKS2VybmVsVXNlcjEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+lJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSKyZMGCcqlYVQmqT/J
+Fnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505HaWv7b+M3qksRHDLpw
+/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/6vcUkg/aU/50MRUN
+qGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4lQjrCpR36fkr5a+vI
+UbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNFvGDOCNBKZK5ZxLZ3
+gGFcR6kL6u11y4zoLrZ6xwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQB8WQMn/cjh
+9qFtr7JL4VPIz/+96QaBmkHxMqiYL/iMg5Vko3GllLc1mgfWQfaWOvyRJClKj395
+595L2u8wBKon3DXUPAkinc6+VOwDWsxFLNtWl+jhigat5UDzGm8ZKFhl0WwNhqzZ
+dlNPrh2LJZzPFfimfGyVkhPHYYdELvn+bnEMT8ae1jw2yQEeVFzHe7ZdlV5nMOE7
+Gx6ZZhYlS+jgpIxez5aiKqit/0azq5GGkpCv2H8/EXxkR4gLZGYnIqGuZP3r34NY
+Lkh5J3Qnpyhdopa/34yOCa8mY1wW7vEro0fb/Dh21bpyEOz6tBk3C1QRaGD+XQOM
+cedxtUjYmWqn
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSK
+yZMGCcqlYVQmqT/JFnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505Ha
+Wv7b+M3qksRHDLpw/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/
+6vcUkg/aU/50MRUNqGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4l
+QjrCpR36fkr5a+vIUbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNF
+vGDOCNBKZK5ZxLZ3gGFcR6kL6u11y4zoLrZ6xwIDAQABAoIBAFlu0T3q18Iu0VlR
+n5GEYMgvSuM4NAVVKo8wwwYMwu1xuvXb+NMLyuyFqzaCQKpHmywOOnfhCC/KkxX8
+Ho87kTbTDKhuXZyOHx0cA1zKCDSlGdK8yt9M1vJMa0pdGi2M34b+uOQ35IVsOocH
+4KWayIH7g52V2xZ2bpOSSnpm0uCPZSBTgClCgTUYepOT2wbLn/8V0NtVpZhDsBqg
+fORuEHkiurrbLa8yjQsvbR+hsR/XbGhre8sTQapj4EITXvkEuOL/vwbRebhOFHgh
+8sipsXZ9CMaJkBpVoLZTxTKQID/9006cczJK2MGKFhn6mvP6AeFuJAM3xqLGZTc4
+xxpfJyECgYEA0+iKxy5r1WUpBHR8jTh7WjLc6r5MFJQlGgLPjdQW6gCIe/PZc+b9
+x5vDp27EQ1cAEePEu0glQ/yk19yfxbxrqHsRjRrgwoiYTXjGI5zZSjXKArHyEgBj
+XOyo5leO5XMFnk2AShPlh+/RhAW3NhxcWkBEAsCD6QyC3BPvP6aaAXkCgYEAs4WH
+dTuweTdnyquHQm59ijatvBeP8h4tBozSupflQjB9WxJeW5uEa8lNQ3lSz1F4TV3M
+xvGdDSqwftLRS2mWGho/1jaCeAzjsiUQ2WUHChxprt0+QU7XkJbaBY9eF+6THZFw
+sDG688TiolxqoD8OYi8EtxmIvbQhXHmXnrk3jj8CgYBSi74rkrisuqg8tQejl0Ht
+w+xsgM5wIblGJZwmOlzmsGh6KGYnkO6Ap/uSKELJnIVJcrk63wKtNigccjPGufwR
++EbA+ZxeCwmQ/B/q1XmLP+K+JAUQ4BfUpdexSqA+XwzsOnJj6NY7mr65t+RDbs7G
+1Uvo6oc37Ai5pAZJfCN3uQKBgQAJr5qvaJkM8UBYXwjdPLjpTCnzjBHoLlifkdmM
+18U23QbmcwdESg/LAQF6MoGVTf//rJ/v2/ltTHBZZ2aDex7uKZxoImjHsWpXokhW
+cmz+zqmlFarWOzrGQl1hD2s0P1sQrVg3KXe8z1KrD/Fw0/Yitga7GlWWZrGmG6li
+lvu4YQKBgQANODQYEaz739IoPNnMfTpTqAoQIOR4PNdMfCXSQrCB8i0Hh4z48E4F
+DEAd1xIYyxI8pu7r52dQlBk7yrILOTG0gmgLJd5xKdtCTrasYAICI3hsRLtP8dVA
+8WeykXY4Wf1bYQ+VzKVImkwL/SBm2ik5woyxCzT8JSjyoAwRrQp9Vw==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/cluster_cert.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/cluster_cert.pem
new file mode 100644
index 00000000000..a8623ab67ef
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/cluster_cert.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkagAwIBAgIBBDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBxMRQwEgYDVQQD
+EwtjbHVzdGVydGVzdDEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCX42ZTwADG
+sEkS7ijfADlDQaJpbdgrnQKa5ssMQK3oRGSqXfTp0ThsJiVBbYZ8ZZRpPMgJdowa
+pFCGHQJh6VOdKelR0f/uNVpBGVz1yD4E4AtkA6UYcIJq6ywcj+W7Pli1Ed8VUN3Q
+tBU+HvHiEdMj74kLJb4ID1cP3gehvRv/0szkN8/ODFKCgYb1619BdFb9gRn8eily
+Wcg1m1gXz2xSfqRZkFEcEYet3BeOEGZBhaufJFzinvQjocH+kWFKlZf0+2DEFFbH
+NRqmabMmqMBUke629EUn8a7PBWBYNLld9afoNHwNY68wpONf5IqR2mNar5bVz8/d
+4g7BuVNvEFdJAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAA3U2O+cE/ZS8SDBw/sr
+BVFf0uaoME7+XX2jdTi4RUpWPfQ6uTkhKnXKzTzGrQtKwA96slGp4c3mxGBaAbC5
+IuTS97mLCju9NFvJVtazIajO4eNlG6dJSk0pQzjc0RAeLYksX/9NRNKZ+lQ5QVS2
+NVLce70QZBIvujjVJZ5hqDdjPV0JGOOUzNGyyUhzgY7s9MQagNnBSu5HO4CK1onc
+goOkizulq/5WF+JtqW8VKKx+/CH6SnTkS4b3qbjgKRmHZcOshH/d4KqhoLya7sfH
+pedmm7WgO9p8umXXqNj+04ehuPKTnD8tLMhj+GbJ9eIChPCBf1XnIzOXYep+fq9j
+n/g=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAl+NmU8AAxrBJEu4o3wA5Q0GiaW3YK50CmubLDECt6ERkql30
+6dE4bCYlQW2GfGWUaTzICXaMGqRQhh0CYelTnSnpUdH/7jVaQRlc9cg+BOALZAOl
+GHCCaussHI/luz5YtRHfFVDd0LQVPh7x4hHTI++JCyW+CA9XD94Hob0b/9LM5DfP
+zgxSgoGG9etfQXRW/YEZ/HopclnINZtYF89sUn6kWZBRHBGHrdwXjhBmQYWrnyRc
+4p70I6HB/pFhSpWX9PtgxBRWxzUapmmzJqjAVJHutvRFJ/GuzwVgWDS5XfWn6DR8
+DWOvMKTjX+SKkdpjWq+W1c/P3eIOwblTbxBXSQIDAQABAoIBAHhjNFMDZ1oUlgbc
+ICcI/VoyprFb8DA5ZbwzXBMdHPpxYzyp9hpxy3/lCDiAwEzPEsAK/h6KCOiA/pYe
+XioPD0gN1TIV+f3r6dqZaNYi3g1tK3odbXkejDCEsFT/NT6hXxw9yw0RKI9ofUHc
+synVqP3duUjNpH6s8fvQp0nqI0wzoNm1kklpTWVjZmbtSZF9m/xfv7NGwQEYUL2V
+f5YvX6aHPVDtUXAqyPBgv6SGuogSSjwRTsNTef3aY6Se5MlP3YIfRqdad8+ORkKu
+WSrO+GjQccV4sztD8Sn3LR7qe6Lmid4yopHSS4EFq0Sc8LznTeflWcRAsBLezRp5
+xZB/blECgYEA8yrEzFA247AOXbhL1CdqMyPs523oy5+dmByyovjYjEhjUCRlAa9D
+ApvID4TfAkA4n0rUdICCtwbZlFrBZbn6rXNvJ362ufZjvaFIucQm90YkG1J6Ldek
+8ohJfLyyLLWzVHJIS7WxFqqsGmDhYUTErFbJZjI8tNSglrc81jUWT7UCgYEAn+dw
+ICyc09f6+xm3nFZIOq2Gtpw8lrOJlwZugn1AqY2D5Ko2gq1Fx2oZWpVaBivjH3gU
+ONlnPuealE0RJHvCm/+axy7Rcj65IwTrN5V+j6rg1tuEdi70PvNKmN6XQqRvEjOX
+HOh3gQYP6EFAoVINZZqUkwJzqpv4tnOSpEHXncUCgYB3+Z8Vq3IZjtDXvslzCGtm
+hhAp81mLtdocpfQhYqP9Ou39KafIV/+49sGTnpwlUShet53xSUK1KSULBGgtV8Bt
++ela1DM1t3Joqn3mYfhTwoCoFl5/5cjVfRa8+6DxXEj5nlU7PY79PwIhFbG9ux9K
+ZJuD17+J/Oqq0gerLJAwjQKBgAS4AbkRV/dwcjmiwqZcbXk90bHl3mvcFH1edTho
+ldXrFS9UTpOApYSC/wiLS8LO3L76/i3HTKKwlwE1XQIknNOZsWmbWhby/uenp4FW
+agu3UTdF9xy9uft5loP4XaJb0+NHnnf97DjkgueptUyNbVPIQgYsllk8jRRlSLiM
+MN65AoGAUPLlh8ok/iNirO5YKqc5/3FKA1o1V1KSTHYVUK+Y+vuVJxQZeO3LMybe
+7AJ1cLHEWc8V4B27e6g33rfGGAW+/+RJ7/uHxuYCuKhstbq/x+rf9i4nl93emlMV
+PC3yuZsCmpk9Uypzi2+PT10yVgXkXRYtLpuUpoABWRzVXGnEsXo=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..55f93a01d1c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,213 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ var attrname;
+ for (attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ } else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ // Already handled above
+ } else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Create and authenticate high-privilege user in case mongod is running with authorization.
+ // Try/catch is necessary in case this is being run on an uninitiated replset, by a test
+ // such as repl_options.js for example.
+ var ex;
+ try {
+ mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
+ mongod.getDB("admin").auth("root", "pass");
+ } catch (err) {
+ ex = err;
+ }
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ mongod.getDB("admin").logout();
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({configdb: baseMongod.host});
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_auth.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_auth.ini
new file mode 100644
index 00000000000..c1193be1b03
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_auth.ini
@@ -0,0 +1 @@
+auth=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_dur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_dur.ini
new file mode 100644
index 00000000000..8f83f3ae5a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_dur.ini
@@ -0,0 +1 @@
+dur=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini
new file mode 100644
index 00000000000..fc839a98a76
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini
@@ -0,0 +1 @@
+httpinterface=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini
new file mode 100644
index 00000000000..a091421022d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini
@@ -0,0 +1 @@
+ipv6=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_journal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_journal.ini
new file mode 100644
index 00000000000..d0010a86906
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_journal.ini
@@ -0,0 +1 @@
+journal=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini
new file mode 100644
index 00000000000..82847f50b2b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini
@@ -0,0 +1 @@
+jsonp=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini
new file mode 100644
index 00000000000..f21b50f9513
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini
@@ -0,0 +1 @@
+moveParanoia=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini
new file mode 100644
index 00000000000..a65f909baf3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini
@@ -0,0 +1 @@
+noauth=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini
new file mode 100644
index 00000000000..b490f9038dd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini
@@ -0,0 +1 @@
+noAutoSplit=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini
new file mode 100644
index 00000000000..b0c73a48b30
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini
@@ -0,0 +1 @@
+nodur=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini
new file mode 100644
index 00000000000..52c4958da6e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini
@@ -0,0 +1 @@
+nohttpinterface=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini
new file mode 100644
index 00000000000..79e428c492f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini
@@ -0,0 +1 @@
+noIndexBuildRetry=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini
new file mode 100644
index 00000000000..17172363d25
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini
new file mode 100644
index 00000000000..4696304134f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini
@@ -0,0 +1 @@
+noMoveParanoia=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini
new file mode 100644
index 00000000000..471e83c3172
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini
@@ -0,0 +1 @@
+noobjcheck=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini
new file mode 100644
index 00000000000..08c78be3507
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini
@@ -0,0 +1 @@
+noprealloc=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini
new file mode 100644
index 00000000000..66da9f08391
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini
@@ -0,0 +1 @@
+nounixsocket=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini
new file mode 100644
index 00000000000..bd19d026bbf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini
@@ -0,0 +1 @@
+objcheck=false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini
new file mode 100644
index 00000000000..43495fbd0bd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini
@@ -0,0 +1 @@
+dur=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini
new file mode 100644
index 00000000000..f750ac2e185
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini
@@ -0,0 +1 @@
+journal=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini
new file mode 100644
index 00000000000..f1046df16a9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini
@@ -0,0 +1 @@
+nodur=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini
new file mode 100644
index 00000000000..737e5c28029
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl.pem
new file mode 100644
index 00000000000..275c9e2d91c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:56:28 2014 GMT
+ Next Update: Aug 18 13:56:28 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 48:1b:0b:b1:89:f5:6f:af:3c:dd:2a:a0:e5:55:04:80:16:b4:
+ 23:98:39:bb:9f:16:c9:25:73:72:c6:a6:73:21:1d:1a:b6:99:
+ fc:47:5e:bc:af:64:29:02:9c:a5:db:15:8a:65:48:3c:4f:a6:
+ cd:35:47:aa:c6:c0:39:f5:a6:88:8f:1b:6c:26:61:4e:10:d7:
+ e2:b0:20:3a:64:92:c1:d3:2a:11:3e:03:e2:50:fd:4e:3c:de:
+ e2:e5:78:dc:8e:07:a5:69:55:13:2b:8f:ae:21:00:42:85:ff:
+ b6:b1:2b:69:08:40:5a:25:8c:fe:57:7f:b1:06:b0:72:ff:61:
+ de:21:59:05:a8:1b:9e:c7:8a:08:ab:f5:bc:51:b3:36:68:0f:
+ 54:65:3c:8d:b7:80:d0:27:01:3e:43:97:89:19:89:0e:c5:01:
+ 2c:55:9f:b6:e4:c8:0b:35:f8:52:45:d3:b4:09:ce:df:73:98:
+ f5:4c:e4:5a:06:ac:63:4c:f8:4d:9c:af:88:fc:19:f7:77:ea:
+ ee:56:18:49:16:ce:62:66:d1:1b:8d:66:33:b5:dc:b1:25:b3:
+ 6c:81:e9:d0:8a:1d:83:61:49:0e:d9:94:6a:46:80:41:d6:b6:
+ 59:a9:30:55:3d:5b:d3:5b:f1:37:ec:2b:76:d0:3a:ac:b2:c8:
+ 7c:77:04:78
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNTYyOFoXDTI0MDgxODEzNTYyOFqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEASBsLsYn1b6883Sqg5VUEgBa0I5g5u58WySVzcsam
+cyEdGraZ/EdevK9kKQKcpdsVimVIPE+mzTVHqsbAOfWmiI8bbCZhThDX4rAgOmSS
+wdMqET4D4lD9Tjze4uV43I4HpWlVEyuPriEAQoX/trEraQhAWiWM/ld/sQawcv9h
+3iFZBagbnseKCKv1vFGzNmgPVGU8jbeA0CcBPkOXiRmJDsUBLFWftuTICzX4UkXT
+tAnO33OY9UzkWgasY0z4TZyviPwZ93fq7lYYSRbOYmbRG41mM7XcsSWzbIHp0Iod
+g2FJDtmUakaAQda2WakwVT1b01vxN+wrdtA6rLLIfHcEeA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..0b99d56936e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,41 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:43:27 2014 GMT
+ Next Update: Aug 18 13:43:27 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+Revoked Certificates:
+ Serial Number: 02
+ Revocation Date: Aug 21 13:43:27 2014 GMT
+ Signature Algorithm: sha256WithRSAEncryption
+ 24:86:73:8d:7f:55:15:d0:d6:8a:47:53:cf:97:f7:e5:3d:0b:
+ 4a:ea:fb:02:6a:2e:79:c6:b1:38:b2:ac:f0:c0:64:47:b0:3e:
+ ad:4e:2e:94:e6:64:ed:79:34:bd:74:c0:d4:3d:b9:a1:bb:38:
+ 89:5c:02:6a:ad:6b:dc:3b:64:34:6a:2d:4c:90:36:82:95:0c:
+ 19:88:e2:a3:bf:8e:1b:56:98:37:32:87:ed:f0:bd:dd:e2:0d:
+ f9:80:dc:f2:a5:b4:ee:d9:bb:83:fe:b8:3a:13:e0:da:fc:04:
+ 77:fb:ce:f9:c5:2a:54:a7:f0:34:09:2a:b2:3d:46:1b:48:e6:
+ e8:16:c7:a1:3c:88:8c:72:cd:cc:53:dc:f8:54:63:1f:b9:8b:
+ ea:2c:e5:26:c5:b4:a4:9f:8b:e1:6c:85:9b:c6:63:6f:2f:ae:
+ 18:c5:6a:23:f0:58:27:85:5c:0f:01:04:da:d2:8b:de:9e:ab:
+ 46:00:22:07:28:e1:ef:46:91:90:06:58:95:05:68:67:58:6e:
+ 67:a8:0b:06:1a:73:d9:04:18:c9:a3:e4:e3:d6:94:a3:e1:5c:
+ e5:08:1b:b3:9d:ab:3e:ea:20:b1:04:e5:90:e1:42:54:b2:58:
+ bb:51:1a:48:87:60:b0:95:4a:2e:ce:a0:4f:8c:17:6d:6b:4c:
+ 37:aa:4d:d7
+-----BEGIN X509 CRL-----
+MIIB5DCBzQIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNDMyN1oXDTI0MDgxODEzNDMyN1owFDASAgECFw0xNDA4MjExMzQz
+MjdaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACSGc41/VRXQ
+1opHU8+X9+U9C0rq+wJqLnnGsTiyrPDAZEewPq1OLpTmZO15NL10wNQ9uaG7OIlc
+Amqta9w7ZDRqLUyQNoKVDBmI4qO/jhtWmDcyh+3wvd3iDfmA3PKltO7Zu4P+uDoT
+4Nr8BHf7zvnFKlSn8DQJKrI9RhtI5ugWx6E8iIxyzcxT3PhUYx+5i+os5SbFtKSf
+i+FshZvGY28vrhjFaiPwWCeFXA8BBNrSi96eq0YAIgco4e9GkZAGWJUFaGdYbmeo
+CwYac9kEGMmj5OPWlKPhXOUIG7Odqz7qILEE5ZDhQlSyWLtRGkiHYLCVSi7OoE+M
+F21rTDeqTdc=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_expired.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..c9b3abb05a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/crl_expired.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Jul 21 19:45:56 2014 GMT
+ Next Update: Jul 21 20:45:56 2014 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:e8:6d:51:fc:0e:66:08:22:b2:4d:fb:da:7a:5f:4d:d1:a0:
+ 80:f0:18:f3:c5:ca:c7:05:6c:70:59:fa:d5:96:68:fa:c7:1d:
+ 7e:fb:53:3b:4a:8f:ed:bb:51:04:e8:fb:db:d7:b8:96:d9:e2:
+ 8d:bb:54:cc:11:60:c8:20:ea:81:28:5f:e1:eb:d6:8c:94:bf:
+ 42:e0:7f:a3:13:0c:76:05:f2:f0:34:98:a3:e8:64:74:4c:cb:
+ bf:39:bb:fa:d5:2d:72:02:d1:fa:56:15:59:12:b7:ff:a3:cc:
+ c9:d6:14:ca:4a:1e:0b:b4:47:cf:58:b0:e5:24:d2:21:71:0d:
+ 2d:09:77:5c:2f:ef:40:f8:74:90:03:cc:37:2e:ea:6a:25:59:
+ c0:bf:48:90:00:55:9c:db:bf:1f:f0:7b:b6:5a:90:94:b6:8d:
+ 7c:7d:bb:2d:11:5f:0c:f5:4a:9b:c5:ed:ab:e3:fd:35:c8:76:
+ 3b:2e:41:cb:df:76:b5:f4:e9:05:72:f6:56:7a:fc:34:07:d6:
+ a2:55:eb:7c:58:33:5b:9d:3e:b2:03:89:01:c6:d1:54:75:1a:
+ 5c:73:3f:5e:2e:fd:3b:38:ed:d4:e1:fa:ec:ff:84:f0:55:ee:
+ 83:e0:f0:13:97:e7:f0:55:8c:00:a3:1a:31:e4:31:9e:68:d0:
+ 6d:3e:81:b0
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDcyMTE5NDU1NloXDTE0MDcyMTIwNDU1NlqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEAFOhtUfwOZggisk372npfTdGggPAY88XKxwVscFn6
+1ZZo+scdfvtTO0qP7btRBOj729e4ltnijbtUzBFgyCDqgShf4evWjJS/QuB/oxMM
+dgXy8DSYo+hkdEzLvzm7+tUtcgLR+lYVWRK3/6PMydYUykoeC7RHz1iw5STSIXEN
+LQl3XC/vQPh0kAPMNy7qaiVZwL9IkABVnNu/H/B7tlqQlLaNfH27LRFfDPVKm8Xt
+q+P9Nch2Oy5By992tfTpBXL2Vnr8NAfWolXrfFgzW50+sgOJAcbRVHUaXHM/Xi79
+Ozjt1OH67P+E8FXug+DwE5fn8FWMAKMaMeQxnmjQbT6BsA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/expired.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/expired.pem
new file mode 100644
index 00000000000..e1d2ceb8de8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/expired.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfzCCAmegAwIBAgIBEDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzEwMTYwMDAwWhcNMTQwNzE2MTYwMDAwWjBtMRAwDgYDVQQD
+EwdleHBpcmVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAU
+BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG
+EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPFSQZlHvJpi3dmA
+1X5U1qaUN/O/EQy5IZ5Rw+cfFHWOZ84EsLZxehWyqDZRH49Rg06xSYdO2WZOopP8
+OnUVCLGL819K83ikZ5sCbvB/gKCSCenwveEN992gJfs70HaZfiJNC7/cFigSb5Jg
+5G77E1/Uml4hIThfYG2NbCsTuP/P4JLwuzCkfgEUWRbCioMPEpIpxQw2LCx5DCy6
+Llhct0Hp14N9dZ4nA1h1621wOckgGJHw9DXdt9rGzulY1UgOOPczyqT08CdpaVxK
+VzrJCcUxfUjhO4ukHz+LBFQY+ZEm+tVboDbinbiHxY24urP46/u+BwRvBvjOovJi
+NVUh5GsCAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEw
+DQYJKoZIhvcNAQEFBQADggEBAG3rRSFCSG3hilGK9SMtTpFnrquJNlL+yG0TP8VG
+1qVt1JGaDJ8YUc5HXXtKBeLnRYii7EUx1wZIKn78RHRdITo5OJvlmcwwh0bt+/eK
+u9XFgR3z35w5UPr/YktgoX39SOzAZUoorgNw500pfxfneqCZtcRufVvjtk8TUdlN
+lcd2HfIxtUHWJeTcVM18g0JdHMYdMBXDKuXOW9VWLIBC2G6nAL/8SZJtUaDllPb4
+NisuIGjfjGgNxMpEXn+sQjFTupAoJru21OtAgERWFJhKQ0hbO0kucEPKEfxHDBVG
+dKSRIl6b0XSDLfxEXPv5ZhdrK4KEw1dYYXySvIVXtn0Ys38=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA8VJBmUe8mmLd2YDVflTWppQ3878RDLkhnlHD5x8UdY5nzgSw
+tnF6FbKoNlEfj1GDTrFJh07ZZk6ik/w6dRUIsYvzX0rzeKRnmwJu8H+AoJIJ6fC9
+4Q333aAl+zvQdpl+Ik0Lv9wWKBJvkmDkbvsTX9SaXiEhOF9gbY1sKxO4/8/gkvC7
+MKR+ARRZFsKKgw8SkinFDDYsLHkMLLouWFy3QenXg311nicDWHXrbXA5ySAYkfD0
+Nd232sbO6VjVSA449zPKpPTwJ2lpXEpXOskJxTF9SOE7i6QfP4sEVBj5kSb61Vug
+NuKduIfFjbi6s/jr+74HBG8G+M6i8mI1VSHkawIDAQABAoIBAGAO1QvVkU6HAjX8
+4X6a+KJwJ2F/8aJ14trpQyixp2wv1kQce9bzjpwqdGjCm+RplvHxAgq5KTJfJLnx
+UbefOsmpoqOQ6x9fmdoK+uwCZMoFt6qGaJ63960hfVzm71D2Qk4XCxFA4xTqWb0T
+knpWuNyRfSzw1Q9ib7jL7X2sKRyx9ZP+1a41ia/Ko6iYPUUnRb1Ewo10alYVWVIE
+upeIlWqv+1DGfda9f34pGVh3ldIDh1LHqaAZhdn6sKtcgIUGcWatZRmQiA5kSflP
+VBpOI2c2tkQv0j5cPGwD7GGaJ2aKayHG0EwnoNmxCeR0Ay3MO0vBAsxn7Wy6yqrS
+EfkYhFkCgYEA/OA2AHFIH7mE0nrMwegXrEy7BZUgLRCRFWTjxwnCKFQj2Uo2dtYD
+2QQKuQWeiP+LD2nHj4n1KXuSJiB1GtmEF3JkYV4Wd7mPWEVNDHa0G8ZndquPK40s
+YSjh9u0KesUegncBFfIiwzxsk9724iaXq3aXOexc0btQB2xltRzj6/0CgYEA9E2A
+QU6pnCOzGDyOV7+TFr0ha7TXaMOb5aIVz6tJ7r5Nb7oZP9T9UCdUnw2Tls5Ce5tI
+J23O7JqwT4CudnWnk5ZtVtGBYA23mUryrgf/Utfg08hU2uRyq9LOxVaVqfV/AipN
+62GmfuxkK4PatOcAOhKqmS/zGfZqIg7V6rtX2ocCgYEAlY1ogpR8ij6mvfBgPmGr
+9nues+uBDwXYOCXlzCYKTN2OIgkQ8vEZb3RDfy9CllVDgccWfd6iPnlVcvUJLOrt
+gwxlL2x8ryvwCc1ahv+A/1g0gmtuDdy9HW0XTnjcFMWViKUm4DrGsl5+/GkF67PV
+SVOmllwifOthpjJGaHmAlmUCgYB6EFMZzlzud+PfIzqX20952Avfzd6nKL03EjJF
+rbbmA82bGmfNPfVHXC9qvRTWD76mFeMKWFJAY9XeE1SYOZb+JfYBn/I9dP0cKZdx
+nutSkCx0hK7pI6Wr9kt7zBRBdDj+cva1ufe/iQtPtrTLGHRDj9oPaibT/Qvwcmst
+umdd9wKBgQDM7j6Rh7v8AeLy2bw73Qtk0ORaHqRBHSQw87srOLwtfQzE92zSGMj+
+FVt/BdPgzyaddegKvJ9AFCPAxbA8Glnmc89FO7pcXn9Wcy+ZoZIF6YwgUPhPCp/4
+r9bKuXuQiutFbKyes/5PTXqbJ/7xKRZIpQCvxg2syrW3hxx8LIx/kQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/extended_assert.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/extended_assert.js
new file mode 100644
index 00000000000..e020af1c642
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/extended_assert.js
@@ -0,0 +1,61 @@
+// Exports 'extendedAssert' which includes all built in assertions and:
+// - New extendedAssert.strContains(needle, haystack, msg)
+// - a .soon variant of eq, neq, contains, gt, lt, gte, lte, and strContains
+// e.g. .eq.soon(expected, getActualFunc, msg[, timeout, interval])
+// This produces more descriptive assertion error messages than the built
+// in assert.soon provides.
+
+var extendedAssert;
+(function() {
+ if (typeof extendedAssert !== 'undefined') {
+ return;
+ }
+
+ // Make a copy of the assert object
+ extendedAssert = assert.bind(this);
+ for (var key in assert) {
+ if (assert.hasOwnProperty(key)) {
+ extendedAssert[key] = assert[key];
+ }
+ }
+
+ extendedAssert.strContains = function(needle, haystack, msg) {
+ if (haystack.indexOf(needle) === -1) {
+ doassert('"' + haystack + '" does not contain "' + needle + '" : ' + msg);
+ }
+ };
+
+ var EX_ASSERT_DONT_PRINT = '**extended_assert.js--do not print this error message**';
+ var builtin_doassert = doassert;
+ var muteable_doassert = function(msg, obj) {
+ if (msg.indexOf(EX_ASSERT_DONT_PRINT) !== -1) {
+ throw Error(msg);
+ }
+ builtin_doassert(msg, obj);
+ };
+
+ ['eq', 'neq', 'contains', 'gt', 'lt', 'gte', 'lte', 'strContains']
+ .forEach(function (name) {
+ var assertFunc = extendedAssert[name];
+ var newAssertFunc = assertFunc.bind(this);
+ newAssertFunc.soon = function(expected, actualFunc, msg, timeout, interval) {
+ try {
+ doassert = muteable_doassert;
+ extendedAssert.soon(function() {
+ try {
+ assertFunc(expected, actualFunc(), EX_ASSERT_DONT_PRINT);
+ return true;
+ } catch (e) {
+ return false;
+ }
+ }, EX_ASSERT_DONT_PRINT, timeout, interval);
+ doassert = builtin_doassert;
+ } catch (e) {
+ doassert = builtin_doassert;
+ // Make it fail
+ assertFunc(expected, actualFunc(), msg);
+ }
+ };
+ extendedAssert[name] = newAssertFunc;
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/fts.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/fts.js
new file mode 100644
index 00000000000..0da80d5d3ae
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/fts.js
@@ -0,0 +1,22 @@
+function getIDS(commandResult) {
+ if (!(commandResult && commandResult.results)) {
+ return [];
+ }
+
+ return commandResult.results.map(function(z) {
+ return z.obj._id;
+ });
+}
+
+function queryIDS(coll, search, filter, extra) {
+ var cmd = {search: search};
+ if (filter) {
+ cmd.filter = filter;
+ }
+ if (extra) {
+ Object.extend(cmd, extra);
+ }
+ lastCommadResult = coll.runCommand("text", cmd);
+
+ return getIDS(lastCommadResult);
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/geo_near_random.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..7809aa77adc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/geo_near_random.js
@@ -0,0 +1,100 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+};
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds) {
+ if (!indexBounds) {
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [(Random.rand() * (range - eps) + eps) + indexBounds.min, (Random.rand() * (range - eps) + eps) + indexBounds.min];
+
+};
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ var bulk = this.t.initializeUnorderedBulkOp();
+ for (var i=0; i<nPts; i++) {
+ bulk.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ }
+ assert.writeOK(bulk.execute());
+
+ if (!indexBounds) {
+ this.t.ensureIndex({loc: '2d'});
+ } else {
+ this.t.ensureIndex({loc: '2d'}, indexBounds);
+ }
+};
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++) {
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0];
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1];
+ var dS = short[i].obj ? short[i].dis : 1;
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0];
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1];
+ var dL = long[i].obj ? long[i].dis : 1;
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+};
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear: this.t.getName(), near: pt, num: 1, spherical: opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++) {
+ // print(i); // uncomment to watch status
+ cmd.num = i;
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded) {
+ last = last.map(function(x) {
+ return x.obj;
+ });
+
+ var query = {loc: {}};
+ query.loc[opts.sphere ? '$nearSphere' : '$near'] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/host_ipaddr.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/host_ipaddr.js
new file mode 100644
index 00000000000..f5412da1563
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/host_ipaddr.js
@@ -0,0 +1,38 @@
+// Returns non-localhost ipaddr of host running the mongo shell process
+function get_ipaddr() {
+ // set temp path, if it exists
+ var path = "";
+ try {
+ path = TestData.tmpPath;
+ if (typeof path === "undefined") {
+ path = "";
+ } else if (path.slice(-1) !== "/") {
+ // Terminate path with / if defined
+ path += "/";
+ }
+ } catch (err) {
+ // no testdata
+ }
+
+ var ipFile = path+"ipaddr.log";
+ var windowsCmd = "ipconfig > "+ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipAddr = null;
+ var hostType = null;
+
+ try {
+ hostType = getBuildInfo().sysInfo.split(' ')[0];
+
+ // os-specific methods
+ if (hostType === "windows") {
+ runProgram('cmd.exe', '/c', windowsCmd);
+ ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
+ } else {
+ runProgram('bash', '-c', unixCmd);
+ ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
+ }
+ } finally {
+ removeFile(ipFile);
+ }
+ return ipAddr;
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key1 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key2 b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e6aca6a217d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDgTCCAmmgAwIBAgIBBTANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBvMRIwEAYDVQQD
+EwkxMjcuMC4wLjExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEW
+MBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNV
+BAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiqQNGgQggL8S
+LlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9wd0VNuD6+Ycg1mBbopO+M/K/ZWv8c
+7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9kVGRb2bNAfV2bC5/UnO1ulQdHoIB
+p3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8rwNNFvooMRg8yq8tq0qBkVhh85kct
+HHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqvI/Y5eIeZLhdIzAv37kolr8AuyqIR
+qcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZxoZN9Jv7x5LyiA+ijtQ+5aI/kMPqG
+nox+/bNFCQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAu
+MTANBgkqhkiG9w0BAQUFAAOCAQEAVJJNuUVzMRaft17NH6AzMSTiJxMFWoafmYgx
+jZnzA42XDPoPROuN7Bst6WVYDNpPb1AhPDco9qDylSZl0d341nHAuZNc84fD0omN
+Mbqieu8WseRQ300cbnS8p11c9aYpO/fNQ5iaYhGsRT7pnLs9MIgR468KVjY2xt49
+V0rshG6RxZj83KKuJd0T4X+5UeYz4B677y+SR0aoK2I2Sh+cffrMX2LotHc2I+JI
+Y9SDLvQT7chD9GzaWz634kmy3EEY0LreMm6AxhMOsr0lbZx5O8wLTScSjKARJ6OH
+nPxM1gYT07mkNmfyEnl1ChAN0MPgcLHQqEfe7x7ZQSbAv2gWfA==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAiqQNGgQggL8SLlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9w
+d0VNuD6+Ycg1mBbopO+M/K/ZWv8c7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9
+kVGRb2bNAfV2bC5/UnO1ulQdHoIBp3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8r
+wNNFvooMRg8yq8tq0qBkVhh85kctHHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqv
+I/Y5eIeZLhdIzAv37kolr8AuyqIRqcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZx
+oZN9Jv7x5LyiA+ijtQ+5aI/kMPqGnox+/bNFCQIDAQABAoIBAQAMiUT+Az2FJsHY
+G1Trf7Ba5UiS+/FDNNn7cJX++/lZQaOj9BSRVFzwuguw/8+Izxl+QIL5HlWDGupc
+tJICWwoWIuVl2S7RI6NPlhcEJF7hgzwUElnOWBfUgPEsqitpINM2e2wFSzHO3maT
+5AoO0zgUYK+8n9d74KT9CFcLqWvyS3iksK/FXfCZt0T1EoJ4LsDjeCTfVKqrku2U
++fCnZZYNkrgUI7Hku94EJfOh462V4KQAUGsvllwb1lfmR5NR86G6VX6oyMGctL5e
+1M6XQv+JQGEmAe6uULtCUGh32fzwJ9Un3j2GXOHT0LWrVc5iLuXwwzQvCGaMYtKm
+FAIDpPxhAoGBAMtwzpRyhf2op/REzZn+0aV5FWKjeq69Yxd62RaOf2EetcPwvUOs
+yQXcP0KZv15VWU/XhZUmTkPf52f0YHV/b1Sm6wUOiMNQ4XpnRj2THf0N7RS4idMm
+VwtMf1pxqttxQVKPpOvPEiTyIh2Nx/juyfD4CWkOVNTvOCd1w+av6ukNAoGBAK51
+gIXDuwJ2e5h3IJyewN/HOZqlgPKyMjnACaeXQ5wPJSrz4+UkJkuXT2dYKhv6u7K/
+GtucTdvBIJeq61+LjjkYk7OVDzoqP/uWU7p1y7gU9LZq+7tgq7r8cgeaC3IBQe7X
+jdFPEy1+zAEBh6MfFjnLZ2Kop9qbH3cNih/g9pTtAoGBAJ8dmdUtRXNByCsa7Rv2
+243qiDlf14J4CdrBcK1dwm75j/yye7VEnO2Cd8/lZHGpm3MBBC/FiA06QElkL1V2
+2GKDMun/liP9TH1p7NwYBqp3i+ha9SE6qXXi3PCmWpXLnOWwB7OPf4d6AgjPbYpb
+aYKY3PNYDC2G9IqYZyI0kSy5AoGBAJ5Fe5PfPom9c+OeL7fnTpO16kyiWZnUkDxU
+PG4OjQfHtbCCEv6PDS8G1sKq+Yjor+A5/+O8qeX0D92I8oB720txQI5rbKUYL3PP
+raY7t9YJLPlRlY8o5KN+4vSCjF+hRG+qnr6FPqDHp8xB1wvl6AQGxIR8/csVcDZR
+0j2ZmhsBAoGAO1Cpk/hWXOLAhSj8P8Q/+3439HEctTZheVBd8q/TtdwXocaZMLi8
+MXURuVTw0GtS9TmdqOFXzloFeaMhJx6TQzZ2aPcxu95b7RjEDtVHus3ed2cSJ2El
+AuRvFT2RCVvTu1mM0Ti7id+d8QBcpbIpPjNjK2Wxir/19gtEawlqlkA=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..480300f29e1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBBjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB5MRwwGgYDVQQD
+ExNzYW50ZXN0aG9zdG5hbWUuY29tMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoT
+B01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZ
+b3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJKOLTNEPv08IVmhfkv6Xq1dT6pki76ggpJ7UpwdUSsTsWDKO2o1c7wnzEjfhYQ+
+CtlEvbYyL3O7f8AaO15WJdi53SMuWS+QfCKs6b0symYbinSXlZGb4oZYFSrodSxH
++G8u+TUxyeaXgTHowMWArmTRi2LgtIwXwwHJawfhFDxji3cSmLAr5YQMAaXUynq3
+g0DEAGMaeOlyn1PkJ2ZfJsX2di+sceKb+KK1xT+2vUSsvnIumBCYqMhU6y3WjBWK
+6WrmOcsldWo4IcgyzwVRlZiuuYoe6ZsxZ4nMyTdYebALPqgkt8QVXqkgcjWK8F18
+nuqWIAn1ISTjj73H4cnzYv0CAwEAAaM8MDowOAYDVR0RBDEwL4INKi5leGFtcGxl
+LmNvbYIJMTI3LjAuMC4xgghtb3JlZnVuIYIJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+BQUAA4IBAQA5M3U4wvQYI3jz/+Eh4POrJAs9eSRGkUhz1lP7D6Fcyp+BbbXB1fa9
+5qpD4bp1ZoDP2R2zca2uwwfd3DTWPbmwFMNqs2D7d0hgX71Vg9DCAwExFjoeRo44
+cCE9kakZtE3kT/tiH6SpYpnBa3dizxTmiY48z212Pw813SSXSPMN1myx5sMJof5I
+whJNQhSQOw6WHw5swZJZT4FkzxjQMrTWdF6r0d5EU9K2WWk5DTwq4QaysplB5l0H
+8qm+fnC6xI+2qgqMO9xqc6qMtHHICXtdUOup6wj/bdeo7bAQdVDyKlFKiYivDXvO
+RJNp2cwsBgxU+qdrtOLp7/j/0R3tUqWb
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAko4tM0Q+/TwhWaF+S/perV1PqmSLvqCCkntSnB1RKxOxYMo7
+ajVzvCfMSN+FhD4K2US9tjIvc7t/wBo7XlYl2LndIy5ZL5B8IqzpvSzKZhuKdJeV
+kZvihlgVKuh1LEf4by75NTHJ5peBMejAxYCuZNGLYuC0jBfDAclrB+EUPGOLdxKY
+sCvlhAwBpdTKereDQMQAYxp46XKfU+QnZl8mxfZ2L6xx4pv4orXFP7a9RKy+ci6Y
+EJioyFTrLdaMFYrpauY5yyV1ajghyDLPBVGVmK65ih7pmzFniczJN1h5sAs+qCS3
+xBVeqSByNYrwXXye6pYgCfUhJOOPvcfhyfNi/QIDAQABAoIBADqGMkClwS2pJHxB
+hEjc+4/pklWt/ywpttq+CpgzEOXN4GiRebaJD+WUUvzK3euYRwbKb6PhWJezyWky
+UID0j/qDBm71JEJdRWUnfdPAnja2Ss0Sd3UFNimF5TYUTC5ZszjbHkOC1WiTGdGP
+a+Oy5nF2SF4883x6RLJi963W0Rjn3jIW9LoLeTgm9bjWXg3iqonCo3AjREdkR/SG
+BZaCvulGEWl/A3a7NmW5EGGNUMvzZOxrqQz4EX+VnYdb7SPrH3pmQJyJpAqUlvD5
+y7pO01fI0wg9kOWiIR0vd3Gbm9NaFmlH9Gr2oyan3CWt1h1gPzkH/V17rZzVYb5L
+RnjLdyECgYEA6X16A5Gpb5rOVR/SK/JZGd+3z52+hRR8je4WhXkZqRZmbn2deKha
+LKZi1eVl11t8zitLg/OSN1uZ/873iESKtp/R6vcGcriUCd87cDh7KTyW/7ZW5jdj
+o6Y3Liai3Xrf6dL+V2xYw964Map9oK9qatYw/L+Ke6b9wbGi+hduf1kCgYEAoK8n
+pzctajS3Ntmk147n4ZVtcv78nWItBNH2B8UaofdkBlSRyUURsEY9nA34zLNWI0f3
+k59+cR13iofkQ0rKqJw1HbTTncrSsFqptyEDt23iWSmmaU3/9Us8lcNGqRm7a35V
+Km0XBFLnE0mGFGFoTpNt8oiR4WGASJPi482xkEUCgYEAwPmQn2SDCheDEr2zAdlR
+pN3O2EwCi5DMBK3TdUsKV0KJNCajwHY72Q1HQItQ6XXWp7sGta7YmOIfXFodIUWs
+85URdMXnUWeWCrayNGSp/gHytrNoDOuYcUfN8VnDX5PPfjyBM5X7ox7vUzUakXSJ
+WnVelXZlKR9yOOTs0xAMpjkCgYAbF61N6mXD5IOHwgajObsrM/CyVP/u4WDJ0UT0
+Zm1pJbc9wgCauQSUfiNhLpHmoc5CQJ4jy96b3+YJ+4OnPPMSntPt4FFV557CkWbQ
+M8bWpLZnZjhixP4FM9xRPA2r8WTCaRifAKnC1t+TRvBOe2YE6aK+I/zEzZW9pwG4
+ezQXKQKBgQAIBSJLa6xWbfbzqyPsvmRNgiEjIamF7wcb1sRjgqWM6sCzYwYv8f5v
+9C4YhNXEn+c5V2KevgYeg6iPSQuzEAfJx64QV7JD8kEBf5GNETnuW45Yg7KwKPD6
+ZCealfpy/o9iiNqbWqDNND91pj2/g5oZnac3misJg5tGCJbJsBFXag==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockkrb5.conf b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockservice.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockuser.keytab b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mongostat.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mongostat.js
new file mode 100644
index 00000000000..91b5c6f36fb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/mongostat.js
@@ -0,0 +1,114 @@
+var exitCodeSuccess = 0;
+var exitCodeErr = 1;
+// Go reserves exit code 2 for its own use.
+var exitCodeBadOptions = 3;
+var exitCodeStopped = 4;
+
+// NOTE: On Windows, stopMongoProgramByPid doesn't terminiate a process in a
+// way that it can control its exit code.
+if (_isWindows()) {
+ exitCodeStopped = exitCodeErr;
+}
+
+var rowRegex = /^sh\d+\|\s/;
+// portRegex finds the port on a line which has enough whitespace-delimited
+// values to be considered a stat line and not an error message
+var portRegex = /^sh\d+\|\s+\S+:(\d+)(\s+\S+){16}/;
+
+function statRows() {
+ return rawMongoProgramOutput()
+ .split("\n")
+ .filter(function(r) {
+ return r.match(rowRegex);
+ })
+ .map(function(r) {
+ return r.replace(/^sh\d+\| /, "");
+ });
+}
+
+function statFields(row) {
+ return row.split(/\s/).filter(function(s) {
+ return s !== "";
+ });
+}
+
+function getLatestChunk() {
+ var output = rawMongoProgramOutput();
+ // mongostat outputs a blank line between each set of stats when there are
+ // multiple hosts; we want just one chunk of stat lines
+ var lineChunks = output.split("| \n");
+ if (lineChunks.length === 1) {
+ return lineChunks[0];
+ }
+ return lineChunks[lineChunks.length - 2];
+}
+
+function latestPortCounts() {
+ var portCounts = {};
+ getLatestChunk().split("\n").forEach(function(r) {
+ var matches = r.match(portRegex);
+ if (matches === null) {
+ return;
+ }
+ var port = matches[1];
+ if (!portCounts[port]) {
+ portCounts[port] = 0;
+ }
+ portCounts[port]++;
+ });
+ return portCounts;
+}
+
+function hasPort(port) {
+ port = String(port);
+ return function() {
+ return latestPortCounts()[port] >= 1;
+ };
+}
+
+function lacksPort(port) {
+ port = String(port);
+ return function() {
+ return latestPortCounts()[port] === undefined;
+ };
+}
+
+function hasOnlyPorts(expectedPorts) {
+ expectedPorts = expectedPorts.map(String);
+ return function() {
+ var portCounts = latestPortCounts();
+ for (var port in portCounts) {
+ if (expectedPorts.indexOf(port) === -1) {
+ return false;
+ }
+ }
+ for (var i in expectedPorts) {
+ if (portCounts[expectedPorts[i]] !== 1) {
+ return false;
+ }
+ }
+ return true;
+ };
+}
+
+function statCheck(args, checker) {
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect.apply(null, args);
+ try {
+ assert.soon(checker, "discoverTest wait timed out");
+ return true;
+ } catch (e) {
+ return false;
+ } finally {
+ stopMongoProgramByPid(pid);
+ }
+}
+
+function discoverTest(ports, connectHost) {
+ return statCheck(["mongostat",
+ "--host", connectHost,
+ "--noheaders",
+ "--discover"],
+ hasOnlyPorts(ports));
+}
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/not_yet_valid.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/not_yet_valid.pem
new file mode 100644
index 00000000000..7c021c0becd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/not_yet_valid.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIBETANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMjAwNzE3MTYwMDAwWhcNMjUwNzE3MTYwMDAwWjBzMRYwFAYDVQQD
+Ew1ub3RfeWV0X3ZhbGlkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdv
+REIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQsw
+CQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2gF+Fo
+CeBKVlPyDAaEA7cjK75CxnzQy+oqw1j/vcfe/CfKL9MvDDXauR/9v1RRlww5zlxQ
+XJJtcMJtxN1EpP21cHrHCpJ/fRsCdMfJdD9MO6gcnclEI0Odwy5YI/57rAgxEuDC
+7z4d+M6z7PLq8DIwvRuhAZVTszeyTsCCkwfTJ/pisD2Ace75pS37t/ttQp+kQ+Vl
+QrfccHYxrScQ9i0JqBfrTULDl6ST76aINOaFKWqrLLkRUvE6pEkL/iP6xXUSKOsm
+uyc0yb0PK5Y/IVdrzwWUkabWEM27RAMH+CAx2iobk6REj0fsGySBzT2CaETZPjck
+vn/LYKqr+CvYjc8CAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcu
+MC4wLjEwDQYJKoZIhvcNAQEFBQADggEBADw37jpmhj/fgCZdF1NrDKLmWxb4hovQ
+Y9PRe6GsBOc1wH8Gbe4UkYAE41WUuT3xW9YpfCHLXxC7da6dhaBISWryX7n72abM
+xbfAghV3je5JAmC0E/OzQz8tTgENxJN/c4oqCQ9nVOOLjwWiim5kF0/NY8HCc/Sg
+OG9IdseRX72CavDaPxcqR9/5KKY/pxARMeyy3/D0FIB1Fwu5h9vjHEi5fGOqcizf
+S1KHfzAmTxVtjw6HWRGKmkPX0W0/lURWVkKRxvC8KkJIeKx3fl9U1PqCw0AVi5d/
+whYn4qHNFFp4OiVzXq3b5YoBy0dlHUePCIPT2GkGlV4NQKosZMJUkKo=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAzaAX4WgJ4EpWU/IMBoQDtyMrvkLGfNDL6irDWP+9x978J8ov
+0y8MNdq5H/2/VFGXDDnOXFBckm1wwm3E3USk/bVwescKkn99GwJ0x8l0P0w7qByd
+yUQjQ53DLlgj/nusCDES4MLvPh34zrPs8urwMjC9G6EBlVOzN7JOwIKTB9Mn+mKw
+PYBx7vmlLfu3+21Cn6RD5WVCt9xwdjGtJxD2LQmoF+tNQsOXpJPvpog05oUpaqss
+uRFS8TqkSQv+I/rFdRIo6ya7JzTJvQ8rlj8hV2vPBZSRptYQzbtEAwf4IDHaKhuT
+pESPR+wbJIHNPYJoRNk+NyS+f8tgqqv4K9iNzwIDAQABAoIBAFWTmjyyOuIArhrz
+snOHv7AZUBw32DmcADGtqG1Cyi4DrHe22t6ORwumwsMArP8fkbiB2lNrEovSRkp0
+uqjH5867E1vVuJ2tt1hlVkrLmbi6Nl3JwxU/aVm7r7566kgAGmGyYsPt/PmiKamF
+Ekkq49pPlHSKNol6My0r5UCTVzO6uwW7dAa4GOQRI7bM7PVlxRVVeNzPH3yOsTzk
+smrkRgf8HbjtY7m/EHG281gu14ZQRCqzLshO2BtWbkx9dMXnNU5dRRaZ8Pe8XN0Z
+umsStcX6So6VFAqlwknZTi1/sqyIuQLfE+S9DocVQkvKFUgKpFddK8Nmqc8xPCKt
+UwR9hEECgYEA9kZ5KmUbzxQrF8Kn9G18AbZ/Cf6rE9fhs/J8OGcuuJ9QTjPO7pxV
+T7lGrIOX3dVu3+iHrYXZUZv+UTOePWx+ghqJ8ML7RdVsxAWMqh+1J0eBJKIdc9mt
+0hGkLEyyBbAlfNmvw8JugTUeZH2gA+VK9HoMTAjD+LvH164rrktauKECgYEA1b6z
+lZypAbAqnuCndcetcgatdd/bYNH5WWTgdZHqInt3k94EsUEHFNMQUbO+FNkOJ4qJ
+Jp7xrqkOUX+MPrzV5XYVapamlht9gvUtyxGq7DYndlq4mIsN5kReH++lqONBnWoG
+ZlbxvadkvPo+bK003hsl+E4F8X7xUssGGLvygG8CgYEAm/yLJkUgVgsqOER86R6n
+mtYipQv/A/SK6tU9xOPl/d46mS3LderjRjnN/9rhyAo1zfCUb14GBeDONlSBd9pO
+Ts3MbQiy6sqBt67kJ6UpspVhwPhFu2k25YVy/PQfFec591hSMaXnJEOm2nOPdKg4
+z5y2STqMFfGqZHvXAvCLp8ECgYA8oVGTmNKf9fbBBny5/iAG/jnp+8vg1O7kGqdI
+8lD14wvyV8IA/a8iixRP+Kpsg31uXe+1ktR/dNjo6UNA8JPD+RDuITmzzqx1n1KU
+DbjsNBhRjD5cluUkcjQ43uOg2oXcPxz9nqAH6hm7OUjHzwH2FsFYg9lPvXB6ybg6
+/+Uz5QKBgBxvTtLsZ3Cvvb3qezn4DdpLjlsrT6HWaTGqwEx8NYVBTFX/lT8P04tv
+NqFuQsDJ4gw0AZF7HqF49qdpnHEJ8tdHgBc/xDLFUMuKjON4IZtr0/j407K6V530
+m4q3ziHOu/lORDcZTz/YUjEzT8r7Qiv7QusWncvIWEiLSCC2dvvb
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/parallelTester.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..5886a7dace9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/parallelTester.js
@@ -0,0 +1,268 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if (typeof _threadInject !== "undefined") {
+ // print( "fork() available!" );
+
+ Thread = function() {
+ this.init.apply(this, arguments);
+ };
+ _threadInject(Thread.prototype);
+
+ ScopedThread = function() {
+ this.init.apply(this, arguments);
+ };
+ ScopedThread.prototype = new Thread(function() {});
+ _scopedThreadInject(ScopedThread.prototype);
+
+ fork = function() {
+ var t = new Thread(function() {});
+ Thread.apply(t, arguments);
+ return t;
+ };
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function(me, collectionName, mean, host) {
+ this.mean = mean;
+ if (host === undefined) {
+ host = db.getMongo().host;
+ }
+ this.events = [me, collectionName, host];
+ };
+
+ EventGenerator.prototype._add = function(action) {
+ this.events.push([Random.genExp(this.mean), action]);
+ };
+
+ EventGenerator.prototype.addInsert = function(obj) {
+ this._add("t.insert( " + tojson(obj) + " )");
+ };
+
+ EventGenerator.prototype.addRemove = function(obj) {
+ this._add("t.remove( " + tojson(obj) + " )");
+ };
+
+ EventGenerator.prototype.addUpdate = function(objOld, objNew) {
+ this._add("t.update( " + tojson(objOld) + ", " + tojson(objNew) + " )");
+ };
+
+ EventGenerator.prototype.addCheckCount = function(count, query, shouldPrint, checkQuery) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson(query) + " ) );";
+ if (checkQuery) {
+ action += " assert.eq( " + count + ", t.find( " + tojson(query) + " ).toArray().length );";
+ }
+ if (shouldPrint) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add(action);
+ };
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ };
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray(arguments);
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo(host);
+ var t = m.getDB("test")[collectionName];
+ args.forEach(function(v) {
+ sleep(v[0]);
+ eval(v[1]); // eslint-disable-line no-eval
+ });
+ };
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode");
+ this.params = [];
+ };
+
+ ParallelTester.prototype.add = function(fun, args) {
+ args = args || [];
+ args.unshift(fun);
+ this.params.push(args);
+ };
+
+ ParallelTester.prototype.run = function(msg, newScopes) {
+ newScopes = newScopes || false;
+ assert.parallelTests(this.params, msg, newScopes);
+ };
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function(n) {
+ var params = [];
+ for (var i = 0; i < n; ++i) {
+ params.push([]);
+ }
+
+ var makeKeys = function(a) {
+ var ret = {};
+ a.forEach(function(v) {
+ ret[v] = 1;
+ });
+ return ret;
+ };
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([
+ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js", // log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ]);
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [
+ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys(serialTestsArr);
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[0] = serialTestsArr;
+ var files = listFiles(parallelFilesDir);
+ files = Array.shuffle(files);
+
+ i = 0;
+ files.forEach(function(x) {
+ if ((/[/\\]_/.test(x.name)) ||
+ (!/\.js$/.test(x.name)) ||
+ (x.name.match(parallelFilesDir + "/(.*\\.js)")[1] in skipTests) ||
+ (x.name in serialTests)) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[i % n].push(x.name);
+ ++i;
+ });
+
+ // randomize ordering of the serialTests
+ params[0] = Array.shuffle(params[0]);
+
+ params.forEach(function(param) {
+ param.unshift(i);
+ });
+
+ return params;
+ };
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray(arguments);
+ var suite = args.shift();
+ args.forEach(function(x) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc(function() {
+ load(x);
+ }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms");
+ });
+ };
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function(params, msg, newScopes) {
+ newScopes = newScopes || false;
+ var wrapper = function(fun, argv) {
+ // TODO: this doesn't need to use eval
+ eval( // eslint-disable-line no-eval
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson(argv) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ };
+ var runners = [];
+ params.forEach(function(param) {
+ var test = param.shift();
+ var t;
+ if (newScopes) {
+ t = new ScopedThread(wrapper(test, param));
+ } else {
+ t = new Thread(wrapper(test, param));
+ }
+ runners.push(t);
+ });
+
+ runners.forEach(function(x) {
+ x.start();
+ });
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach(function(x) {
+ if (!x.returnData()) {
+ ++nFailed;
+ }
+ });
+ assert.eq(0, nFailed, msg);
+ };
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/password_protected.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..25e47bc2402
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBCTANBgkqhkiG9w0BAQUFADB4MRswGQYDVQQDExJwYXNz
+d29yZF9wcm90ZWN0ZWQxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29E
+QjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJ
+BgNVBAYTAlVTMB4XDTE0MDcxNzE2MDAwMFoXDTIwMDcxNzE2MDAwMFoweDEbMBkG
+A1UEAxMScGFzc3dvcmRfcHJvdGVjdGVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNV
+BAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5l
+dyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALT4r3Hcou2auIOHeihBSjk4bKQTVqI6r/stnkul359SRfKuzVA9gMQaRRDi
+MJoxczHJzS2FX+wElzBt2EUhfu3qpUJ4gJw7H4WjLx+mNnj/+6b4HUO4eRzH5hTE
+A+qgDH40qYjFDEjiARvybWo3IlDLeI/uFwlyUj5PZBUBc1LBBzNtCBfJ2MmHLhIx
+jzTFhkJZll673LL6BPHtJclXCazqKUZDLqObW4Ei6X4hdBOdC8v8Q6GMgC4BxLe0
+wsOpKYYeM3il4BtfiqDQB5ZPG0lgo1Y7OOyFHFXBA7oNkK8lykhdyH4iLt5L9mWo
+VKyZ79VqSODFuCqWo8n8kUTgA/0CAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAntxk8a0HcuPG8Fdjckp6WL+HKQQnUKdvSk06rPF0SHpN
+Ma4eZcaumROdtAYnPKvtbpq3DRCQlj59dlWPksEcYiXqf56TgcyAz5K5g5z9RbFi
+ArvAXJNRcDz1080NWGBUTPYyiKWR3PhtlYhJZ4r7fQIWLv4mifXHViw2roXXhsAY
+ubk9HOtrqE7x6NJXgR24aybxqI6TfAKfM+LJNtMwMFrPC+GHnhqMOs/jHJS38NIB
+TrKA63TdpYUroVu23/tGLQaJz352qgF4Di91RkUfnI528goj57pX78H8KRsSNVvs
+KHVNrxtZIez+pxxjBPnyfCH81swkiAPG9fdX+Hcu5A==
+-----END CERTIFICATE-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6y3b7IxcANECAggA
+MB0GCWCGSAFlAwQBAgQQrTslOKC0GZZwq48v7niXYQSCBNBNKsTN7fyw60/EEDH0
+JUgxL83Wfb7pNP97/lV5qiclY1mwcKz44kXQaesFTzhiwzAMOpbI/ijEtsNU25wV
+wtTgjAC3Em/+5/ygrmAu7hgacIRssspovmsgw029E9iOkyBd1VIrDVMi7HLHf0iU
+2Zq18QF20az2pXNMDipmVJkpc9NvjSdqka5+375pJuisspEWCDBd11K10jzCWqB5
+q3Rm1IIeq+mql6KT1rJcUoeE0facDc9GDYBiF/MfIKQ3FrZy/psqheCfL1UDUMyc
+mnm9GJO5+bCuHkg8ni0Zo5XXsf2VEFt0yt6lSucoOP43flucQaHnFKcn+5DHjDXv
+S6Eb5wEG9qWtzwWy/9DfRbkj6FxUgT3SFgizo/uLmdqFCJCnYkHUD1OuYCDmoIXP
+VTinwgK4lO/vrGfoPQrgJmdlnwHRWYjlB8edMCbmItaj2Esh3FBS12y976+UT0Sk
+8n5HsZAEYScDyNArVhrLUZRgF+r+bgZ28TDFO0MISPCAbZjhvq6lygS3dEmdTUW3
+cFDe1deNknWxZcv4UpJW4Nq6ckxwXBfTB1VFzjp7/vXrK/Sd9t8zi6vKTO8OTqc4
+KrlLXBgz0ouP/cxhYDykUrKXE2Eb0TjeAN1txZWo3fIFzXUvDZCphQEZNUqsFUxH
+86V2lwqVzKrFq6UpTgKrfTw/2ePQn9dQgd7iFWDTWjRkbzA5aAgTSVP8xQRoIOeQ
+epXtP9202kEz3h28SZYK7QBOTTX9xNmV/dzDTsi9nXZ6KtsP/aGFE5hh95jvESx/
+wlOBAPW4HR33rSYalvQPE7RjjLZHOKuYIllUBGlTOfgdA+WUXR3KxiLNPdslPBPV
++O6aDyerhWoQwE7TFwhP/FpxL/46hOu4iq4fgqfjddBTq8z5jG3c3zzogDjoDzBF
+LEQDcbenUCGbEQ7zxXsXtr3QinJ+aAejDO38hp1h9ROb5LF53/9H2j/16nby/jPX
+7kp2weRSKGJ0B6AVuS9pTsQz4+E3icsIgBWSU6qtcUz2GO2QxnFuvT9LEVnyMNN2
+IKMIEKi2FsUMddHGXLULTANlzUMocdHrd5j81eqcFPhMOFOiHpgwiwxqZyBYOLRl
+Fe7x5dLVWoLgjJagZj8uYnJbExDsfFLjEx8p4Z+rejJIC5CqZLbz9sDgCtIL+92k
++x4mlT1Rfmz9pU+RQqik83nFFRBGWxeW9iWWEgocWtmezvnK6E241v78zkqxNkvF
+JJo7BsBw7DiEHEfLhBZYuqV2q6+kwqgYrzyGIwAJkBGrkYfalVzgR+3/uN04h005
+M3jQRpSkDVGYr3JKEAlh3Sc+JD9VPbu6/RXNwy5mY67UCgWGaFwRqJE3DC9aKfNC
+OET8m8+8oQgFzhw3pNpENsgwR+Sx3K4q0GI3YwxT02pieBFNQaw53O3B3TtoCjkk
+UsuyIWqcLonwo4I3z0kjU3gEFN+0m4E4/A1DNt0J3rsKN+toCk1FqbxQg9xTZzXu
+hYmA3HMMwugzXmCanqBhmMsniPg+dRxCIfiHZhLuEpjKxZWcMWcW4M6l/wbM+LbE
+oDcTuI9ezfPTZ3xA8hNIHBT3MhuI7EJQnvKKvJDJeyX5sAtmSsSFqhEr8QZD8RgV
+5H9eOyUdfcWxLlstcq982V0oGg==
+-----END ENCRYPTED PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/server.pem
new file mode 100644
index 00000000000..d01b336fb86
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/server.pem
@@ -0,0 +1,58 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml server.pem
+#
+# General purpose server certificate file.
+-----BEGIN CERTIFICATE-----
+MIIEZDCCA0wCBAyW2DwwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
+DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
+IENBMB4XDTE5MDkyNTIzMjc0MVoXDTM5MDkyNzIzMjc0MVowbDELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAw
+DgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxDzANBgNVBAMMBnNlcnZl
+cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAME6pVjavnJIORzETE+Y
+UKRONMo0dRF1Jmib/zXylTssSJDGwrxL10vfCAyXSxJqQRpJraAbdrV01e0jdal3
+Y6jkXLqxv8rWuMDaFJUbOFPjz8Ad4JsvxSgJVSBeKRw4YcGC5U9B6lkuF8oZPq65
+nhLeHZliDL2LZCep8+8YCY3zPhpQ82huf4DkOMsbPxe0/Mo5r3Z3+BIMsGrKeVlY
+TUBReMLUPAGcuAkyxN+WV6wRjlXxUOzk0txmWTzzt2dx2XLGR/Ob8fLRSm4U471P
+7lRg8gaKzVXUcFQOA6KiM+aeIcDYlL/Z+5yyO/aGY1Xxt43MkrsAq2Afx3gKQYe9
+yukCAwEAAaOCAQkwggEFMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQW
+MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUPyEOAwSRUZaUE3lS4Nsq
+2KO0mMEwgYsGA1UdIwSBgzCBgKF4pHYwdDELMAkGA1UEBhMCVVMxETAPBgNVBAgM
+CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdNb25n
+b0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggQZ
+kSMSMB8GA1UdEQQYMBaCCWxvY2FsaG9zdIIJMTI3LjAuMC4xMA0GCSqGSIb3DQEB
+CwUAA4IBAQCTAoHLEMxJF7OFKTpuAub1y9ooiryn57agjiSsV8J5EiNahbwyr/cv
+HBD+TTvOfNV+RudScxKPl+opz3qAX8dDDbGAgUdvGeDI3UTiPOjD2okVrH8hKlA1
+/cTkBZNc6b3QZ4URoG3DkGDgM2P9/fNBp0G0QiYns2hMWy8mxxTuC3zGCB6q5A48
+lGxz0eNOHvKDwhAJBOeSVKc1wtmazviU6d5TH+WjWKO5ulbMiQq8R7U1gPcIFhk+
+937EfCiHE8VQOdWJ2XHeY7XitT57ukvi9PZYDYd2Jc0at2l69MYoFetV2jM+l60R
+KIsXzgkv8Hg+OYXl/du6oWJsGujJnryk
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDBOqVY2r5ySDkc
+xExPmFCkTjTKNHURdSZom/818pU7LEiQxsK8S9dL3wgMl0sSakEaSa2gG3a1dNXt
+I3Wpd2Oo5Fy6sb/K1rjA2hSVGzhT48/AHeCbL8UoCVUgXikcOGHBguVPQepZLhfK
+GT6uuZ4S3h2ZYgy9i2QnqfPvGAmN8z4aUPNobn+A5DjLGz8XtPzKOa92d/gSDLBq
+ynlZWE1AUXjC1DwBnLgJMsTfllesEY5V8VDs5NLcZlk887dncdlyxkfzm/Hy0Upu
+FOO9T+5UYPIGis1V1HBUDgOiojPmniHA2JS/2fucsjv2hmNV8beNzJK7AKtgH8d4
+CkGHvcrpAgMBAAECggEAKRC1ozSZ1N/Ug3CUZzYPrVuRjIepeZRUHVdJ3cU4QeMf
+aOVU7X+QueckZsigg8FhJJ0T8Trk95tk+4jVsLZWz8X4jxTHlewcR6ART78BMZLw
+y/uNiG2WkPOqy743LnAtFlsjMLzpeRp6o9DJqYh5N7lMwRQ9cOr21hcgaGfLYAc7
+Bbl/565RSeuw2fmSGkUMOAnPBoJvRfcsaZnEbciLY5ollLPmV00F1elEGGUyynlE
+3WiMuh2dtT2em7pdJJCcTxeKrF6Kdl0R1CYx8Ay/N7YMg3LgBY+SmR559qEy1IA5
+sMjjd79Jg+GNxj+/8V1zHePaNy1CeQLWoVb7OLZsAQKBgQDx0SpcWU/Z2P87CCqW
+Ym8jE3FFHHrz33UWmqvJ9dmVQSHZn3UaUw9WF9L8mS7t3fNroQ1xVOoB+GkKuWC5
+BnWqyOKNnIK8jNp62M26S6nyAhEwAgLK68YHrKZde9FZYE42SNlo/OQBULfJ47KF
+S0Pldz2dkQhzLPooeedonzu1mQKBgQDMj/EcQeHBAFJ8jfXgqRWAXbYzDtK1hdiy
+WtBZFVMGlomW7Mq11148GWLVVMpQX3eyPmv+t7KfI02q+oNkJsw7BUdjzsSuUtt1
+IULuznS9c+MpzUE37fXZkcUcbTDYXH8Bp0v+5u98E5rWgb4jbM3LHaKoyyMgHeAT
+qs6nzUZx0QKBgEy21aB8ePecfsQEgDY7PhS7m0iYk20ArA2ivfMROpi+E/YNP0I9
+rXTblASsuBWgPxQbAL++d4pueyx6yPOJ2BR5LF5Ok68rZDhqxUFftadf/oHjcW1c
+xPM0U4b+u9iF0+omzEohSFgeel/jC4R77ytB29s74plfYHEs0zv+oBupAoGAb4DK
+kXeL4dMWI1SSQOqJkNYD+aLL0ixqKRX9WYGZJw8pYn4iJKqHcET6LYSxXswbB6qk
+3UwubjYlINy6f/3x9v3yowHTDZ6WhyshO+CHm7kuituaAC2ShAzEH7GZHWaVyEXz
+07b3tW9OSDyictbc+ilmP5GyzM/cE3xYdvkau/ECgYEA8P4AV6KSjLswaMTYSc+X
+/8KdBhe6r6Eqs4FdACHeRixvgiafC7PFnnxtpSLAFDWoHI2EZEkTJ564TpAksXA3
+Z5zletQt0cPe3CQVvowrYgNUtjaUNG67BzOoUa4fln+jTg1LnAT3wIQZyef/1rKU
+yHx62fbnF1NBTj3vih8aIOk=
+-----END PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers.js
new file mode 100644
index 00000000000..6b6daa4d71e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers.js
@@ -0,0 +1,1096 @@
+// Wrap whole file in a function to avoid polluting the global namespace
+(function() {
+ jsTestOptions = function () {
+ if (TestData) {
+ return Object.merge(_jsTestOptions, {
+ setParameters: TestData.setParameters,
+ setParametersMongos: TestData.setParametersMongos,
+ storageEngine: TestData.storageEngine,
+ wiredTigerEngineConfigString: TestData.wiredTigerEngineConfigString,
+ wiredTigerCollectionConfigString: TestData.wiredTigerCollectionConfigString,
+ wiredTigerIndexConfigString: TestData.wiredTigerIndexConfigString,
+ noJournal: TestData.noJournal,
+ noJournalPrealloc: TestData.noJournalPrealloc,
+ auth: TestData.auth,
+ keyFile: TestData.keyFile,
+ authUser: "__system",
+ authPassword: TestData.keyFileData,
+ authMechanism: TestData.authMechanism,
+ adminUser: TestData.adminUser || "admin",
+ adminPassword: TestData.adminPassword || "password",
+ useLegacyConfigServers: TestData.useLegacyConfigServers || false,
+ useLegacyReplicationProtocol: TestData.useLegacyReplicationProtocol || false,
+ enableEncryption: TestData.enableEncryption,
+ encryptionKeyFile: TestData.encryptionKeyFile,
+ auditDestination: TestData.auditDestination,
+ useSSL: TestData.useSSL,
+ minPort: TestData.minPort,
+ maxPort: TestData.maxPort,
+ });
+ }
+ return _jsTestOptions;
+ };
+
+ // Shim to allow compatibility with newer shells.
+ if (typeof stopMongod === 'undefined') {
+ stopMongod = _stopMongoProgram;
+ }
+ if (typeof startMongod === 'undefined') {
+ startMongod = function() {
+ argArray = arguments;
+ if (jsTestOptions().useSSL) {
+ if (argArray.indexOf('--sslMode') < 0) {
+ argArray.push(
+ '--sslMode', 'requireSSL',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation');
+ }
+ }
+ _startMongod.apply(null, argArray);
+ };
+ }
+
+ _parsePath = function() {
+ var dbpath = "";
+ for (var i = 0; i < arguments.length; ++i) {
+ if (arguments[i] === "--dbpath") {
+ dbpath = arguments[i + 1];
+ }
+ }
+
+ if (dbpath === "") {
+ throw Error("No dbpath specified");
+ }
+
+ return dbpath;
+ };
+
+ _parsePort = function() {
+ var port = "";
+ for (var i = 0; i < arguments.length; ++i) {
+ if (arguments[i] === "--port") {
+ port = arguments[i + 1];
+ }
+ }
+
+ if (port === "") {
+ throw Error("No port specified");
+ }
+ return port;
+ };
+
+ connectionURLTheSame = function(a, b) {
+
+ if (a === b) {
+ return true;
+ }
+
+ if (!a || !b) {
+ return false;
+ }
+
+ if (a.host) {
+ return connectionURLTheSame(a.host, b);
+ }
+ if (b.host) {
+ return connectionURLTheSame(a, b.host);
+ }
+
+ if (a.name) {
+ return connectionURLTheSame(a.name, b);
+ }
+ if (b.name) {
+ return connectionURLTheSame(a, b.name);
+ }
+
+ if (a.indexOf("/") < 0 && b.indexOf("/") < 0) {
+ a = a.split(":");
+ b = b.split(":");
+
+ if (a.length !== b.length) {
+ return false;
+ }
+
+ if (a.length === 2 && a[1] !== b[1]) {
+ return false;
+ }
+
+ if (a[0] === "localhost" || a[0] === "127.0.0.1") {
+ a[0] = getHostName();
+ }
+ if (b[0] === "localhost" || b[0] === "127.0.0.1") {
+ b[0] = getHostName();
+ }
+
+ return a[0] === b[0];
+ }
+ var a0 = a.split("/")[0];
+ var b0 = b.split("/")[0];
+ return a0 === b0;
+ };
+
+ assert(connectionURLTheSame("foo", "foo"));
+ assert(!connectionURLTheSame("foo", "bar"));
+
+ assert(connectionURLTheSame("foo/a,b", "foo/b,a"));
+ assert(!connectionURLTheSame("foo/a,b", "bar/a,b"));
+
+ createMongoArgs = function(binaryName, args) {
+ var fullArgs = [binaryName];
+
+ if (args.length === 1 && isObject(args[0])) {
+ var o = args[0];
+ for (var k in o) {
+ if (o.hasOwnProperty(k)) {
+ if (k === "v" && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10) {
+ n = 10;
+ }
+ var temp = "-";
+ while (n-- > 0) {
+ temp += "v";
+ }
+ fullArgs.push(temp);
+ }
+ } else {
+ fullArgs.push("--" + k);
+ if (o[k] !== "") {
+ fullArgs.push(String(o[k]));
+ }
+ }
+ }
+ }
+ } else {
+ for (var i=0; i<args.length; i++) {
+ fullArgs.push(args[i]);
+ }
+ }
+
+ return fullArgs;
+ };
+
+
+ MongoRunner = function() {};
+
+ MongoRunner.dataDir = "/data/db";
+ MongoRunner.dataPath = "/data/db/";
+ MongoRunner.usedPortMap = {};
+
+ MongoRunner.VersionSub = function(regex, version) {
+ this.regex = regex;
+ this.version = version;
+ };
+
+ // These patterns allow substituting the binary versions used for each
+ // version string to support the dev/stable MongoDB release cycle.
+ MongoRunner.binVersionSubs = [
+ new MongoRunner.VersionSub(/^latest$/, ""),
+ new MongoRunner.VersionSub(/^oldest-supported$/, "1.8"),
+ // To-be-updated when 3.0 becomes available
+ new MongoRunner.VersionSub(/^last-stable$/, "2.6"),
+ // Latest unstable and next stable are effectively the same release
+ new MongoRunner.VersionSub(/^2\.7(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^2\.8(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^3\.0(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^3\.1(\..*){0,1}/, ""),
+ ];
+
+ MongoRunner.getBinVersionFor = function(version) {
+
+ // If this is a version iterator, iterate the version via toString()
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.toString();
+ }
+
+ // No version set means we use no suffix, this is *different* from "latest"
+ // since latest may be mapped to a different version.
+ version = version || "";
+ version = version.trim();
+ if (version === "") {
+ return "";
+ }
+
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.regex.test(version)) {
+ version = sub.version;
+ }
+ }
+
+ return version;
+ };
+
+ MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ versionA = MongoRunner.getBinVersionFor(versionA);
+ versionB = MongoRunner.getBinVersionFor(versionB);
+
+ if (versionA === "" || versionB === "") {
+ return versionA === versionB;
+ }
+
+ return versionA.startsWith(versionB) ||
+ versionB.startsWith(versionA);
+ };
+
+ MongoRunner.logicalOptions = {
+ runId: true,
+ pathOpts: true,
+ remember: true,
+ noRemember: true,
+ appendOptions: true,
+ restart: true,
+ noCleanData: true,
+ cleanData: true,
+ startClean: true,
+ forceLock: true,
+ useLogFiles: true,
+ logFile: true,
+ useHostName: true,
+ useHostname: true,
+ noReplSet: true,
+ forgetPort: true,
+ arbiter: true,
+ noJournalPrealloc: true,
+ noJournal: true,
+ binVersion: true,
+ waitForConnect: true,
+ };
+
+ MongoRunner.toRealPath = function(path, pathOpts) {
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {};
+ path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
+ path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
+ for (key in pathOpts) { // eslint-disable-line guard-for-in
+ path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ }
+
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
+ if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
+ if (path !== "" && !path.endsWith("/")) {
+ path += "/";
+ }
+
+ path = MongoRunner.dataPath + path;
+ }
+
+ return path;
+ };
+
+ MongoRunner.toRealDir = function(path, pathOpts) {
+ path = MongoRunner.toRealPath(path, pathOpts);
+ if (path.endsWith("/")) {
+ path = path.substring(0, path.length - 1);
+ }
+ return path;
+ };
+
+ MongoRunner.toRealFile = MongoRunner.toRealDir;
+
+ MongoRunner.nextOpenPort = function() {
+ if (typeof allocatePort === "function") {
+ return allocatePort();
+ }
+
+ var i = 0;
+ while (MongoRunner.usedPortMap[String(27000 + i)]) {
+ i++;
+ }
+ MongoRunner.usedPortMap[String(27000 + i)] = true;
+
+ return 27000 + i;
+ };
+
+ /**
+ * Returns an iterator object which yields successive versions on toString(), starting from a
+ * random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on toString()
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+ MongoRunner.versionIterator = function(arr, isRandom) {
+
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if (typeof arr === "string") {
+ return arr;
+ }
+ if (arr.isVersionIterator) {
+ return arr;
+ }
+
+ isRandom = isRandom || false;
+
+ // Starting pos
+ var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
+
+ return new MongoRunner.versionIterator.iterator(i, arr);
+ };
+
+ MongoRunner.versionIterator.iterator = function(i, arr) {
+
+ this.toString = function() {
+ i = (i + 1) % arr.length;
+ print("Returning next version : " + i +
+ " (" + arr[i] + ") from " + tojson(arr) + "...");
+ return arr[i];
+ };
+
+ this.isVersionIterator = true;
+
+ };
+
+ /**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+ MongoRunner.arrOptions = function(binaryName, args) {
+ var fullArgs = [""];
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if (isObject(args) || (args.length === 1 && isObject(args[0]))) {
+ var o = isObject(args) ? args : args[0];
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion !== "") {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function(option, value) {
+ if (!o.binVersion) {
+ return true;
+ }
+
+ // Version 1.x options
+ if (o.binVersion.startsWith("1.")) {
+ return ["nopreallocj"].indexOf(option) < 0;
+ }
+
+ return true;
+ };
+
+ var addOptionsToFullArgs = function(k, v) {
+ if (v === undefined || v === null) {
+ return;
+ }
+
+ fullArgs.push("--" + k);
+
+ if (v !== "") {
+ fullArgs.push("" + v);
+ }
+ };
+
+ for (var k in o) {
+ // Make sure our logical option should be added to the array of options
+ if (!o.hasOwnProperty(k) ||
+ k in MongoRunner.logicalOptions ||
+ !isValidOptionForBinary(k, o[k])) {
+ continue;
+ }
+
+ if ((k === "v" || k === "verbose") && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10) {
+ n = 10;
+ }
+ var temp = "-";
+ while (n-- > 0) {
+ temp += "v";
+ }
+ fullArgs.push(temp);
+ }
+ } else if (k === "setParameter" && isObject(o[k])) {
+ // If the value associated with the setParameter option is an object, we want
+ // to add all key-value pairs in that object as separate --setParameters.
+ for (var l = 0; l < Object.keys(o[k]).length; l++) {
+ var paramKey = Object.keys(o[k])[l];
+ addOptionsToFullArgs(k, "" + paramKey + "=" + o[k][paramKey]);
+ }
+ } else {
+ addOptionsToFullArgs(k, o[k]);
+ }
+ }
+ } else {
+ for (var i=0; i<args.length; i++) {
+ fullArgs.push(args[i]);
+ }
+ }
+
+ fullArgs[0] = binaryName;
+ return fullArgs;
+ };
+
+ MongoRunner.arrToOpts = function(arr) {
+ var opts = {};
+ for (var i = 1; i < arr.length; i++) {
+ if (arr[i].startsWith("-")) {
+ var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
+ if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
+ opts[opt] = arr[i + 1];
+ i++;
+ } else {
+ opts[opt] = "";
+ }
+
+ if (opt.replace(/v/g, "") === "") {
+ opts["verbose"] = opt.length;
+ }
+ }
+ }
+
+ return opts;
+ };
+
+ MongoRunner.savedOptions = {};
+
+ MongoRunner.mongoOptions = function(opts) {
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
+
+ // If we're a mongo object
+ if (opts.getDB) {
+ opts = {restart: opts.runId};
+ }
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge(opts || {}, {});
+
+ opts.restart = opts.restart || false;
+
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if (opts.restart && opts.restart.getDB) {
+ opts.runId = opts.restart.runId;
+ opts.restart = true;
+ } else if (isObject(opts.restart)) {
+ // If it's the runId itself
+ opts.runId = opts.restart;
+ opts.restart = true;
+ }
+
+ if (isObject(opts.remember)) {
+ opts.runId = opts.remember;
+ opts.remember = true;
+ } else if (opts.remember === undefined) {
+ // Remember by default if we're restarting
+ opts.remember = opts.restart;
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if (isObject(opts.runId) && opts.runId.runId) {
+ opts.runId = opts.runId.runId;
+ }
+
+ if (opts.restart && opts.remember) {
+ opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
+ }
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId();
+
+ // Save the port if required
+ if (!opts.forgetPort) {
+ opts.port = opts.port || MongoRunner.nextOpenPort();
+ }
+
+ var shouldRemember = (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
+
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ if (shouldRemember) {
+ MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
+ }
+
+ // Default for waitForConnect is true
+ opts.waitForConnect = (waitForConnect === undefined || waitForConnect === null) ?
+ true : waitForConnect;
+
+ if (jsTestOptions().useSSL) {
+ opts.sslMode = opts.sslMode || "requireSSL";
+ opts.sslPEMKeyFile = opts.sslPEMKeyFile || "jstests/libs/server.pem";
+ opts.sslCAFile = opts.sslCAFile || "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if (jsTestOptions().useX509 && !opts.clusterAuthMode) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort();
+ MongoRunner.usedPortMap[String(parseInt(opts.port))] = true;
+
+ opts.pathOpts = Object.merge(opts.pathOpts || {}, {
+ port: String(opts.port),
+ runId: String(opts.runId),
+ });
+
+ if (!opts.bind_ip) {
+ opts.bind_ip = "0.0.0.0";
+ }
+
+ return opts;
+ };
+
+ /**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournalPrealloc {boolean}
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+ MongoRunner.mongodOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+ opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts);
+ opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = opts.dbpath + "/mongod.log";
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
+
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
+
+ if (jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc) {
+ opts.nopreallocj = "";
+ }
+
+ if (jsTestOptions().noJournal || opts.noJournal) {
+ opts.nojournal = "";
+ }
+
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
+
+ if (jsTestOptions().useSSL) {
+ opts.sslMode = opts.sslMode || "requireSSL";
+ opts.sslPEMKeyFile = opts.sslPEMKeyFile || "jstests/libs/server.pem";
+ opts.sslCAFile = opts.sslCAFile || "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if (jsTestOptions().useX509 && !opts.clusterAuthMode) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ if (opts.noReplSet) {
+ opts.replSet = null;
+ }
+ if (opts.arbiter) {
+ opts.oplogSize = 1;
+ }
+
+ return opts;
+ };
+
+ MongoRunner.mongosOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+
+ // Normalize configdb option to be host string if currently a host
+ if (opts.configdb && opts.configdb.getDB) {
+ opts.configdb = opts.configdb.host;
+ }
+
+ opts.pathOpts = Object.merge(opts.pathOpts, {
+ configdb: opts.configdb.replace(/:|,/g, "-")
+ });
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts);
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
+
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
+
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
+
+ return opts;
+ };
+
+ /**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true
+ * forceLock {boolean}: Deletes the lock file if set to true
+ * dbpath {string}: location of db files
+ * cleanData {boolean}: Removes all files in dbpath if true
+ * startClean {boolean}: same as cleanData
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority)
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+ MongoRunner.runMongod = function(opts) {
+ opts = opts || {};
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongodOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ if (opts.forceLock) {
+ removeFile(opts.dbpath + "/mongod.lock");
+ }
+ if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
+ print("Resetting db path '" + opts.dbpath + "'");
+ resetDbpath(opts.dbpath);
+ }
+
+ opts = MongoRunner.arrOptions("mongod", opts);
+ }
+
+ var mongod = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) {
+ mongos = {};
+ }
+ if (!mongod) {
+ return null;
+ }
+
+ mongod.commandLine = MongoRunner.arrToOpts(opts);
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
+ mongod.host = mongod.name;
+ mongod.port = parseInt(mongod.commandLine.port);
+ mongod.runId = runId || ObjectId();
+ mongod.dbpath = fullOptions.dbpath;
+ mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
+ mongod.fullOptions = fullOptions;
+
+ return mongod;
+ };
+
+ MongoRunner.runMongos = function(opts) {
+ opts = opts || {};
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongosOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ opts = MongoRunner.arrOptions("mongos", opts);
+ }
+
+ var mongos = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) {
+ mongos = {};
+ }
+ if (!mongos) {
+ return null;
+ }
+
+ mongos.commandLine = MongoRunner.arrToOpts(opts);
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
+ mongos.host = mongos.name;
+ mongos.port = parseInt(mongos.commandLine.port);
+ mongos.runId = runId || ObjectId();
+ mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
+ mongos.fullOptions = fullOptions;
+
+ return mongos;
+ };
+
+ /**
+ * Kills a mongod process.
+ *
+ * @param {number} port the port of the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * }
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+ MongoRunner.stopMongod = function(port, signal, opts) {
+ if (!port) {
+ print("Cannot stop mongo process " + port);
+ return;
+ }
+
+ signal = signal || 15;
+
+ if (port.port) {
+ port = parseInt(port.port);
+ }
+
+ if (port instanceof ObjectId) {
+ opts = MongoRunner.savedOptions(port);
+ if (opts) {
+ port = parseInt(opts.port);
+ }
+ }
+
+ var exitCode = stopMongod(parseInt(port), parseInt(signal), opts);
+
+ delete MongoRunner.usedPortMap[String(parseInt(port))];
+
+ return exitCode;
+ };
+
+ MongoRunner.stopMongos = MongoRunner.stopMongod;
+
+ MongoRunner.isStopped = function(port) {
+ if (!port) {
+ print("Cannot detect if process " + port + " is stopped.");
+ return;
+ }
+
+ if (port.port) {
+ port = parseInt(port.port);
+ }
+
+ if (port instanceof ObjectId) {
+ opts = MongoRunner.savedOptions(port);
+ if (opts) {
+ port = parseInt(opts.port);
+ }
+ }
+
+ return !MongoRunner.usedPortMap[String(parseInt(port))];
+ };
+
+ /**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName The name of the tool to run
+ * @param {Object} opts options to pass to the tool
+ * {
+ * binVersion {string}: version of tool to run
+ * }
+ *
+ * @see MongoRunner.arrOptions
+ */
+ MongoRunner.runMongoTool = function(binaryName, opts) {
+ opts = opts || {};
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ var argsArray = MongoRunner.arrOptions(binaryName, opts);
+
+ return runMongoProgram.apply(null, argsArray);
+ };
+
+ // Given a test name figures out a directory for that test to use for dump files and makes sure
+ // that directory exists and is empty.
+ MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+ };
+
+ // Start a mongod instance and return a 'Mongo' object connected to it.
+ // This function's arguments are passed as command line arguments to mongod.
+ // The specified 'dbpath' is cleared if it exists, created if not.
+ // var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+ startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+ };
+ startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+ };
+ startMongodNoReset = function() {
+ var args = createMongoArgs("mongod", arguments);
+ return startMongoProgram.apply(null, args);
+ };
+
+ startMongos = function(args) {
+ return MongoRunner.runMongos(args);
+ };
+
+ /**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+ function appendSetParameterArgs(argArray) {
+ var programName = argArray[0];
+ if (programName.endsWith('mongod') || programName.endsWith('mongos')) {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push('--setParameter', "enableTestCommands=1");
+ }
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism !== "SCRAM-SHA-1") {
+ var hasAuthMechs = false;
+ for (i in argArray) {
+ if (typeof argArray[i] === 'string' &&
+ argArray[i].indexOf('authenticationMechanisms') !== -1) {
+ hasAuthMechs = true;
+ break;
+ }
+ }
+ if (!hasAuthMechs) {
+ argArray.push('--setParameter', "authenticationMechanisms=" + jsTest.options().authMechanism);
+ }
+ }
+ if (jsTest.options().auth) {
+ argArray.push('--setParameter', "enableLocalhostAuthBypass=false");
+ }
+ if (jsTestOptions().useSSL) {
+ if (argArray.indexOf('--sslMode') < 0) {
+ argArray.push(
+ '--sslMode', 'requireSSL',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation');
+ }
+ }
+
+ if (programName.endsWith('mongos')) {
+ // mongos only options
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ var params = jsTest.options().setParametersMongos.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) {
+ argArray.push('--setParameter', p);
+ }
+ });
+ }
+ }
+ } else if (programName.endsWith('mongod')) {
+ // mongod only options
+ // set storageEngine for mongod
+ if (jsTest.options().storageEngine) {
+ if (argArray.indexOf("--storageEngine") < 0) {
+ argArray.push('--storageEngine', jsTest.options().storageEngine);
+ }
+ }
+ if (jsTest.options().wiredTigerEngineConfigString) {
+ argArray.push('--wiredTigerEngineConfigString', jsTest.options().wiredTigerEngineConfigString);
+ }
+ if (jsTest.options().wiredTigerCollectionConfigString) {
+ argArray.push('--wiredTigerCollectionConfigString', jsTest.options().wiredTigerCollectionConfigString);
+ }
+ if (jsTest.options().wiredTigerIndexConfigString) {
+ argArray.push('--wiredTigerIndexConfigString', jsTest.options().wiredTigerIndexConfigString);
+ }
+ // apply setParameters for mongod
+ if (jsTest.options().setParameters) {
+ params = jsTest.options().setParameters.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) {
+ argArray.push('--setParameter', p);
+ }
+ });
+ }
+ }
+ }
+ }
+ return argArray;
+ }
+
+ /**
+ * Start a mongo process with a particular argument array. If we aren't waiting for connect,
+ * return null.
+ */
+ MongoRunner.startWithArgs = function(argArray, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = _startMongoProgram.apply(null, argArray);
+
+ var conn = null;
+ if (waitForConnect) {
+ assert.soon(function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch (e) {
+ if (!checkProgram(pid)) {
+ print("Could not start mongo program at " + port + ", process ended");
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+ }
+
+ return conn;
+ };
+
+ /**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+ startMongoProgram = function() {
+ var port = _parsePort.apply(null, arguments);
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply(null, args);
+
+ var m;
+ assert.soon(function() {
+ try {
+ m = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch (e) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended");
+
+ // Break out
+ m = null;
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return m;
+ };
+
+ runMongoProgram = function() {
+ var args = Array.from(arguments);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationDatabase=admin'
+ );
+ }
+
+ if (jsTestOptions().useSSL) {
+ args.push("--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames");
+ }
+
+ if (progName === 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _runMongoProgram.apply(null, args);
+ };
+
+ // Start a mongo program instance. This function's first argument is the
+ // program name, and subsequent arguments to this function are passed as
+ // command line arguments to the program. Returns pid of the spawned program.
+ startMongoProgramNoConnect = function() {
+ var args = Array.from(arguments);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
+
+ if (jsTestOptions().useSSL) {
+ args.push("--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames");
+ }
+
+ if (progName === 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _startMongoProgram.apply(null, args);
+ };
+
+ myPort = function() {
+ var m = db.getMongo();
+ if (m.host.match(/:/)) {
+ return m.host.match(/:(.*)/)[1];
+ }
+ return 27017;
+ };
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers_misc.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers_misc.js
new file mode 100644
index 00000000000..18c290967b3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/servers_misc.js
@@ -0,0 +1,376 @@
+
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function(port, dbpath, peer, arbiter, extraArgs, options) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function(reuseData) {
+ var args = [];
+ if (reuseData) {
+ args.push("mongod");
+ }
+ args.push(
+ "--port", this.port_,
+ "--dbpath", this.dbpath_,
+ "--noprealloc",
+ "--smallfiles");
+ if (!this.options_.no_bind) {
+ args.push("--bind_ip", "127.0.0.1");
+ }
+ if (this.extraArgs_) {
+ args = args.concat(this.extraArgs_);
+ }
+ removeFile(this.dbpath_ + "/mongod.lock");
+ if (reuseData) {
+ return startMongoProgram.apply(null, args);
+ }
+ return startMongod.apply(null, args);
+};
+
+MongodRunner.prototype.port = function() {
+ return this.port_;
+};
+
+MongodRunner.prototype.toString = function() {
+ return [this.port_, this.dbpath_, this.peer_, this.arbiter_].toString();
+};
+
+ToolTest = function(name, extraOptions) {
+ this.useSSL = jsTestOptions().useSSL;
+ this.name = name;
+ this.options = extraOptions;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = MongoRunner.dataPath + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ resetDbpath(this.dbpath);
+ resetDbpath(this.ext);
+};
+
+ToolTest.prototype.startDB = function(coll) {
+ assert(!this.m, "db already running");
+
+ var options = {
+ port: this.port,
+ dbpath: this.dbpath,
+ noprealloc: "",
+ smallfiles: "",
+ bind_ip: "127.0.0.1",
+ };
+
+ Object.extend(options, this.options);
+
+ if (this.useSSL) {
+ Object.extend(options, {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslWeakCertificateValidation: "",
+ });
+ }
+
+ this.m = startMongoProgram.apply(null, MongoRunner.arrOptions("mongod", options));
+ this.db = this.m.getDB(this.baseName);
+ if (coll) {
+ return this.db.getCollection(coll);
+ }
+ return this.db;
+};
+
+ToolTest.prototype.stop = function() {
+ if (!this.m) {
+ return;
+ }
+ MongoRunner.stopMongod(this.port);
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+};
+
+ToolTest.prototype.runTool = function() {
+ var a = ["mongo" + arguments[0]];
+
+ var hasdbpath = false;
+
+ for (var i=1; i<arguments.length; i++) {
+ a.push(arguments[i]);
+ if (arguments[i] === "--dbpath") {
+ hasdbpath = true;
+ }
+ }
+
+ if (this.useSSL) {
+ a = a.concat(["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/server.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames"]);
+ }
+
+ if (!hasdbpath) {
+ a.push("--host");
+ a.push("127.0.0.1:" + this.port);
+ }
+
+ return runMongoProgram.apply(null, a);
+};
+
+
+ReplTest = function(name, ports) {
+ this.name = name;
+ this.ports = ports || allocatePorts(2);
+};
+
+ReplTest.prototype.getPort = function(master) {
+ if (master) {
+ return this.ports[0];
+ }
+ return this.ports[1];
+};
+
+ReplTest.prototype.getPath = function(master) {
+ var p = MongoRunner.dataPath + this.name + "-";
+ if (master) {
+ p += "master";
+ } else {
+ p += "slave";
+ }
+ return p;
+};
+
+ReplTest.prototype.getOptions = function(master, extra, putBinaryFirst, norepl) {
+ if (!extra) {
+ extra = {};
+ }
+
+ if (!extra.oplogSize) {
+ extra.oplogSize = "40";
+ }
+
+ var a = [];
+ if (putBinaryFirst) {
+ a.push("mongod");
+ }
+ a.push("--noprealloc",
+ "--bind_ip", "127.0.0.1",
+ "--smallfiles",
+ "--port", this.getPort(master),
+ "--dbpath", this.getPath(master));
+
+ if (jsTestOptions().noJournal) {
+ a.push("--nojournal");
+ }
+ if (jsTestOptions().noJournalPrealloc) {
+ a.push("--nopreallocj");
+ }
+ if (jsTestOptions().keyFile) {
+ a.push("--keyFile", jsTestOptions().keyFile);
+ }
+
+ if (jsTestOptions().useSSL) {
+ if (!Array.contains(a, "--sslMode")) {
+ a.push("--sslMode", "requireSSL");
+ }
+ if (!Array.contains(a, "--sslPEMKeyFile")) {
+ a.push("--sslPEMKeyFile", "jstests/libs/server.pem");
+ }
+ if (!Array.contains(a, "--sslCAFile")) {
+ a.push("--sslCAFile", "jstests/libs/ca.pem");
+ }
+ a.push("--sslWeakCertificateValidation");
+ }
+ if (jsTestOptions().useX509 && !Array.contains(a, "--clusterAuthMode")) {
+ a.push("--clusterAuthMode", "x509");
+ }
+
+ if (!norepl) {
+ if (master) {
+ a.push("--master");
+ } else {
+ a.push("--slave", "--source", "127.0.0.1:" + this.ports[0]);
+ }
+ }
+
+ for (var k in extra) {
+ if (!extra.hasOwnProperty(k)) {
+ continue;
+ }
+ var v = extra[k];
+ if (k in MongoRunner.logicalOptions) {
+ continue;
+ }
+ a.push("--" + k);
+ if (v !== undefined && v !== null && v !== "") {
+ a.push(v);
+ }
+ }
+
+ return a;
+};
+
+ReplTest.prototype.start = function(master, options, restart, norepl) {
+ var lockFile = this.getPath(master) + "/mongod.lock";
+ removeFile(lockFile);
+ var o = this.getOptions(master, options, restart, norepl);
+
+ if (restart) {
+ var conn = startMongoProgram.apply(null, o);
+ if (!master) {
+ conn.setSlaveOk();
+ }
+ return conn;
+ }
+ conn = startMongod.apply(null, o);
+ if (jsTestOptions().keyFile || jsTestOptions().auth || jsTestOptions().useX509) {
+ jsTest.authenticate(conn);
+ }
+ if (!master) {
+ conn.setSlaveOk();
+ }
+ return conn;
+};
+
+ReplTest.prototype.stop = function(master, signal) {
+ if (arguments.length === 0) {
+ this.stop(true);
+ this.stop(false);
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return MongoRunner.stopMongod(this.getPort(master), signal || 15);
+};
+
+if (typeof allocatePort === 'function') {
+ allocatePorts = function (numPorts) {
+ var ports = [];
+ for (var i = 0; i < numPorts; i++) {
+ ports.push(allocatePort());
+ }
+ return ports;
+ };
+} else {
+ allocatePorts = function(n, startPort) {
+ var ret = [];
+ var start = startPort || 31000;
+ for (var i = start; i < start + n; ++i) {
+ ret.push(i);
+ }
+ return ret;
+ };
+}
+
+
+SyncCCTest = function(testName, extraMongodOptions) {
+ this._testName = testName;
+ this._connections = [];
+
+ for (var i=0; i<3; i++) {
+ this._connections.push(startMongodTest(30000 + i, testName + i, false, extraMongodOptions));
+ }
+
+ this.url = this._connections.map(function(z) {
+ return z.name;
+ }).join(",");
+ this.conn = new Mongo(this.url);
+};
+
+SyncCCTest.prototype.stop = function() {
+ for (var i=0; i<this._connections.length; i++) {
+ MongoRunner.stopMongod(30000 + i);
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+};
+
+SyncCCTest.prototype.checkHashes = function(dbname, msg) {
+ var hashes = this._connections.map(function(z) {
+ return z.getDB(dbname).runCommand("dbhash");
+ });
+
+ for (var i=1; i<hashes.length; i++) {
+ assert.eq(hashes[0].md5, hashes[i].md5, "checkHash on " + dbname + " " + msg + "\n" + tojson(hashes));
+ }
+};
+
+SyncCCTest.prototype.tempKill = function(num) {
+ num = num || 0;
+ MongoRunner.stopMongod(30000 + num);
+};
+
+SyncCCTest.prototype.tempStart = function(num) {
+ num = num || 0;
+ this._connections[num] = startMongodTest(30000 + num, this._testName + num, true);
+};
+
+
+function startParallelShell(jsCode, port, noConnect) {
+ var x;
+
+ var args = ["mongo"];
+
+ // Convert function into call-string
+ if (typeof (jsCode) === "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ } else if (typeof (jsCode) === "string") {
+ // do nothing
+ } else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
+ if (noConnect) {
+ args.push("--nodb");
+ } else if (typeof (db) === "object") {
+ jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db === "object") {
+ var hostAndPort = db.getMongo().host.split(':');
+ var host = hostAndPort[0];
+ args.push("--host", host);
+ if (!port && hostAndPort.length >= 2) {
+ port = hostAndPort[1];
+ }
+ }
+ if (port) {
+ args.push("--port", port);
+ }
+
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function() {
+ return waitProgram(x);
+ };
+}
+
+var testingReplication = false;
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..52d3934a545
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,25 @@
+
+SlowWeeklyMongod = function(name) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty(
+ "--port", this.port,
+ "--dbpath", MongoRunner.dataPath + this.name,
+ "--smallfiles",
+ "--nojournal");
+};
+
+SlowWeeklyMongod.prototype.getDB = function(name) {
+ return this.conn.getDB(name);
+};
+
+SlowWeeklyMongod.prototype.stop = function() {
+ stopMongod(this.port);
+ var end = new Date();
+ print("slowWeekly test: " + this.name + " completed successfully in "
+ + ((end.getTime() - this.start.getTime()) / 1000) + " seconds");
+};
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/smoke.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..7dddf222386
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/smoke.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIBCDANBgkqhkiG9w0BAQUFADBrMQ4wDAYDVQQDEwVzbW9r
+ZTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1O
+ZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwHhcN
+MTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBrMQ4wDAYDVQQDEwVzbW9rZTEP
+MA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcg
+WW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb4fOWDomCPyYesh42pQ/bEHdK7r73
+06x1hdku9i+nytCSxhhuITGC1FA4ZIbYdQC/jgfzC0D+SDFKCCyNZA/2Pxam9y3F
+QHrueNtD9bw/OB98D6hC2fCow5OxUqWDkee2hQRTwLKDzec+H72AkwURh8oTfJsl
+LL/1YITZs9kfs59r8HG2YAT7QBbg3xBmK0wZvL4V/FY/OeeR92pIgjUU/6xm/1LU
+bhNHl5JTrXQxPpmvDb1ysiI0mMLeUz7UI+Pe/9mn91dHwgkprWyFi6VnV3/aW7DC
+nW/DklOPD8vMWu2A6iYU0fZbcj4vGM607vst5QLDMoD5Y2ilrKLiTRa5AgMBAAGj
+EDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJc64d76+eyNGX6C
+5r4IdFF3zJjkLs/NcSMReUTEv4zAdJCn7c1FNRkQBS3Ky2CeSGmiyYOhWZ7usv7x
+EvprmHouWsrQXV+o5EIW366e5wzg0c5KWO3oBIRjx4hDkRSQSjJjy5NFrc8fAW9x
+eeaHFWdqk3CHvqBhd32QYEs4+7v8hBYM3PBkj8qghXta4ZZS89cTMSjhu5s4Opje
+qUzGzoHat2VBdYzIpVOorYMFXObwCeQkCAXO5epuGZ0QhML66hc7FuOsW75kI9aW
+QXVoM/z2Gb1wbBYnwHOXtClK783S3RdV0uJun/pVj+VeHb6fyIQRmC5d0eJ0C8mY
+X+acnvA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA2+Hzlg6Jgj8mHrIeNqUP2xB3Su6+99OsdYXZLvYvp8rQksYY
+biExgtRQOGSG2HUAv44H8wtA/kgxSggsjWQP9j8WpvctxUB67njbQ/W8PzgffA+o
+QtnwqMOTsVKlg5HntoUEU8Cyg83nPh+9gJMFEYfKE3ybJSy/9WCE2bPZH7Ofa/Bx
+tmAE+0AW4N8QZitMGby+FfxWPznnkfdqSII1FP+sZv9S1G4TR5eSU610MT6Zrw29
+crIiNJjC3lM+1CPj3v/Zp/dXR8IJKa1shYulZ1d/2luwwp1vw5JTjw/LzFrtgOom
+FNH2W3I+LxjOtO77LeUCwzKA+WNopayi4k0WuQIDAQABAoIBAQDRFgAaDcLGfqQS
+Bk/iqHz2U6cMMxCW+sqAioGmPWW9iYdiOkra1meNP7T0mur7A+9tN3LpsybfZeiw
+vCsZXDAteXph1KPKcPE0uOnPqumRuB2ATCc1Qqas5CUaNju7a8/J6Jzfw1o9KVud
+4HLDw4nLTLNkalXhOLdkbp6FoZZypAgc8OnSdw7z9Kri6VndkddX3fWv4t203XwT
+AvBxvy4Qfblz6VKYRnjj2CPvo/kD+ncFEg+S6u8/LkghTX7CYeMHdTC0P9jOcEK2
+PMm3kS3sX7VkypsAirYK5QtBWxur+mINxfOBDtRlA2RaJQnikRiGb14bMkLx8Liy
+JNjEHSLdAoGBAP9+KpjniozZIbrcS79wdRrW+ARyDp1Plzyd4nQxfWmQ//nsnK5T
+EYCFXWTR/ldkAoHpD+bGGU02p1+1u4vmWqw/x+Qy56Gh/eylhe0RvYEjkVLyreuc
+bXu0BFlKVgRlBq1ZyXnr2lz3bAIZxvZs13lZn6qVPMt7w2/JTCal9jw7AoGBANxR
+sGik9mq/678nzLiNlf/LcwIz7siuyISoWDOaVEVva0uorqctVqL95w0f+3FXqBO/
+5BiJRFo5D8SfzRjkNkJ7V+rm+7/CjtsjEw2Ue+ZJYPlm+Wr545GYmhU9QH9NLZIN
+JBwTVWjLgdsyQyi0Gc+xMraBwEwoyS8cO17uHO2bAoGBANRmO91/6BPt0ve4epR9
+Vi1o9yki9PlcmHtBOmikWAFyFQvd4+eckVlKBflyBkL6locPjTOqDpC9VengeDj2
+2PyHzZLtqtkZhbK9bJhIfkWknwTZUTMliXMkldTxUo82uZVVpoRgSdmtq7IXYeut
+UnjExFMY3EDB9BizvUYIBKvPAoGAViQ6bS/SiPpxGlRdXus88r6BQSM9AYoVLIkF
+s2dr+5oMwZA6eXLopOHRLPiMP0yekto8PLuu1ffpil9QuaLA9E11moqlc9yGLngQ
+QwcDSo72M41nh8Qcjhi0ZgmE5kEuyCQLMk783fRz2VhVmdyRGvuVcHZa0WxA/QJ0
+1DEVbnECgYEA3i2PGHUvU2TIFNvubw3qdH5y7FXafF+O0ulQ8e6r/CbVAG14Z6xP
+RHLc7/JIYK9CG1PWCbkjiHZ4MsKFuRWFrUMrwSj8M3euCaEIxa/Co60qQ/CnZiZ6
+geleTtUcTZ2T0pqGLnrHwlzhLpCkPJPyjcfQjjEZRwd0bVFX6b3C/rw=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/test_background_ops.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..08c12cb90aa
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/test_background_ops.js
@@ -0,0 +1,334 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function(mongo, name) {
+ var ts = new ObjectId();
+ var lockColl = mongo.getCollection("config.testLocks");
+
+ lockColl.update({_id: name, state: 0}, {$set: {state: 0}}, true);
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime();
+
+ assert.soon(function() {
+ lockColl.update({_id: name, state: 0}, {$set: {ts: ts, state: 1}});
+ var gleObj = lockColl.getDB().getLastErrorObj();
+
+ if (new Date().getTime() - startTime > 20 * 1000) {
+ print("Waiting for...");
+ printjson(gleObj);
+ printjson(lockColl.findOne());
+ printjson(ts);
+ }
+
+ return gleObj.n === 1 || gleObj.updatedExisting;
+ }, "could not acquire lock", 30 * 1000, 100);
+
+ print("Acquired lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+
+ // Set the state back to 0
+ var unlock = function() {
+ print("Releasing lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+ lockColl.update({_id: name, ts: ts}, {$set: {state: 0}});
+ };
+
+ // Return an object we can invoke unlock on
+ return {unlock: unlock};
+};
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function(mongo, name, finished) {
+ if (finished || finished === undefined || finished === null) {
+ mongo.getCollection("config.testFinished").update({_id: name}, {_id: name}, true);
+ } else {
+ mongo.getCollection("config.testFinished").remove({_id: name});
+ }
+};
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function(mongo, name) {
+ return mongo.getCollection("config.testFinished").findOne({_id: name}) !== null;
+};
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function(mongo, name, result, err) {
+ mongo.getCollection("config.testResult").update({_id: name}, {_id: name, result: result, err: err}, true);
+};
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function(mongo, name) {
+ return mongo.getCollection("config.testResult").findOne({_id: name});
+};
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell(jsCode, port) {
+ var x;
+ if (port) {
+ x = startMongoProgramNoConnect("mongo", "--port", port, "--eval", jsCode);
+ } else {
+ x = startMongoProgramNoConnect("mongo", "--eval", jsCode, db ? db.getMongo().host : null);
+ }
+
+ return function() {
+ jsTestLog("Waiting for shell " + x + "...");
+ waitProgram(x);
+ jsTestLog("Shell " + x + " finished.");
+ };
+}
+
+var RandomFunctionContext = function(context) {
+ Random.srand(context.seed);
+ Random.randBool = function() {
+ return Random.rand() > 0.5;
+ };
+
+ Random.randInt = function(min, max) {
+ if (max === undefined) {
+ max = min;
+ min = 0;
+ }
+ return min + Math.floor(Random.rand() * max);
+ };
+
+ Random.randShardKey = function() {
+ var numFields = 2; // Random.randInt(1, 3)
+ var key = {};
+ for (var i = 0; i < numFields; i++) {
+ var field = String.fromCharCode("a".charCodeAt() + i);
+ key[field] = 1;
+ }
+ return key;
+ };
+
+ Random.randShardKeyValue = function(shardKey) {
+ var keyValue = {};
+ for (field in shardKey) {
+ if (!shardKey.hasOwnProperty(field)) {
+ continue;
+ }
+ keyValue[field] = Random.randInt(1, 100);
+ }
+ return keyValue;
+ };
+
+ Random.randCluster = function() {
+ var numShards = 2; // Random.randInt( 1, 10 )
+ var rs = false; // Random.randBool()
+ var st = new ShardingTest({
+ shards: numShards,
+ mongos: 4,
+ other: {separateConfig: true, rs: rs}
+ });
+ return st;
+ };
+};
+
+
+startParallelOps = function(mongo, proc, args, context) {
+ var procName = proc.name + "-" + new ObjectId();
+ var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
+ .getTimestamp().getTime();
+
+ // Make sure we aren't finished before we start
+ setFinished(mongo, procName, false);
+ setResult(mongo, procName, undefined, undefined);
+
+ // TODO: Make this a context of its own
+ var procContext = {
+ procName: procName,
+ seed: seed,
+ waitForLock: waitForLock,
+ setFinished: setFinished,
+ isFinished: isFinished,
+ setResult: setResult,
+ setup: function(context, stored) {
+ waitForLock = function() {
+ return context.waitForLock(db.getMongo(), context.procName);
+ };
+ setFinished = function(finished) {
+ return context.setFinished(db.getMongo(), context.procName, finished);
+ };
+ isFinished = function() {
+ return context.isFinished(db.getMongo(), context.procName);
+ };
+ setResult = function(result, err) {
+ return context.setResult(db.getMongo(), context.procName, result, err);
+ };
+ },
+ };
+
+ var bootstrapper = function(stored) {
+ var procContext = stored.procContext;
+ procContext.setup(procContext, stored);
+
+ var contexts = stored.contexts;
+ eval("contexts = " + contexts); // eslint-disable-line no-eval
+
+ for (var i = 0; i < contexts.length; i++) {
+ if (typeof (contexts[i]) !== "undefined") {
+ // Evaluate all contexts
+ contexts[i](procContext);
+ }
+ }
+
+ var operation = stored.operation;
+ eval("operation = " + operation); // eslint-disable-line no-eval
+
+ var args = stored.args;
+ eval("args = " + args); // eslint-disable-line no-eval
+
+ result = undefined;
+ err = undefined;
+
+ try {
+ result = operation.apply(null, args);
+ } catch (e) {
+ err = e;
+ }
+
+ setResult(result, err);
+ };
+
+ var contexts = [RandomFunctionContext, context];
+
+ var testDataColl = mongo.getCollection("config.parallelTest");
+
+ testDataColl.insert({
+ _id: procName,
+ bootstrapper: tojson(bootstrapper),
+ operation: tojson(proc),
+ args: tojson(args),
+ procContext: procContext,
+ contexts: tojson(contexts),
+ });
+
+ assert.eq(null, testDataColl.getDB().getLastError());
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}";
+
+
+ var oldDB = db;
+ db = mongo.getDB("test"); // eslint-disable-line no-native-reassign
+
+ jsTest.log("Starting " + proc.name + " operations...");
+
+ var rawJoin = startParallelShell(bootstrapStartup);
+
+ db = oldDB; // eslint-disable-line no-native-reassign
+
+
+ var join = function() {
+ setFinished(mongo, procName, true);
+
+ rawJoin();
+ result = getResult(mongo, procName);
+
+ assert.neq(result, null);
+
+ if (result.err) {
+ throw Error("Error in parallel ops " + procName + " : "
+ + tojson(result.err));
+ }
+ return result.result;
+ };
+
+ join.isFinished = function() {
+ return isFinished(mongo, procName);
+ };
+
+ join.setFinished = function(finished) {
+ return setFinished(mongo, procName, finished);
+ };
+
+ join.waitForLock = function(name) {
+ return waitForLock(mongo, name);
+ };
+
+ return join;
+};
+
+
+//
+// Some utility operations
+//
+
+function moveOps(collName, options) {
+ options = options || {};
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
+ var shards = config.shards.find().toArray();
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var findKey = Random.randShardKeyValue(shardKey);
+ var toShard = shards[Random.randInt(shards.length)]._id;
+
+ try {
+ printjson(admin.runCommand({
+ moveChunk: collName,
+ find: findKey,
+ to: toShard,
+ }));
+ } catch (e) {
+ printjson(e);
+ }
+
+ sleep(1000);
+ }
+
+ jsTest.log("Stopping moveOps...");
+}
+
+function splitOps(collName, options) {
+ options = options || {};
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
+ var shards = config.shards.find().toArray();
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var middleKey = Random.randShardKeyValue(shardKey);
+
+ try {
+ printjson(admin.runCommand({
+ split: collName,
+ middle: middleKey,
+ }));
+ } catch (e) {
+ printjson(e);
+ }
+
+ sleep(1000);
+ }
+
+ jsTest.log("Stopping splitOps...");
+}
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/testconfig b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/testconfig
new file mode 100644
index 00000000000..4b09f37ad13
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/testconfig
@@ -0,0 +1,6 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
+help = false
+sysinfo = false
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..285d9afcdcd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,102 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc(coll, doc, mongos) {
+ if (mongos) {
+ coll = mongos.getCollection(String(coll));
+ } else {
+ mongos = coll.getMongo();
+ }
+
+ var config = mongos.getDB("config");
+ var shards = config.shards.find().toArray();
+ for (var i = 0; i < shards.length; i++) {
+ shards[i].conn = new Mongo(shards[i].host);
+ }
+
+ var shardKeyPatt = config.collections.findOne({_id: String(coll)}).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for (var k in shardKeyPatt) {
+ if (!shardKeyPatt.hasOwnProperty(k)) {
+ continue;
+ }
+ if (doc[k] === undefined || docs[k] === null) {
+ jsTest.log("Shard key " + tojson(shardKey)
+ + " not found in doc " + tojson(doc)
+ + ", falling back to _id search...");
+ shardKeyPatt = {_id: 1};
+ shardKey = {_id: doc['_id']};
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if (doc['_id'] === undefined) {
+ jsTest.log("Id not found in doc " + tojson(doc) + " cannot trace oplog entries.");
+ return;
+ }
+
+ jsTest.log("Using shard key : " + tojson(shardKey));
+
+ var allOps = [];
+ for (i = 0; i < shards.length; i++) {
+ var oplog = shards[i].conn.getCollection("local.oplog.rs");
+ if (!oplog.findOne()) {
+ oplog = shards[i].conn.getCollection("local.oplog.$main");
+ }
+
+ if (!oplog.findOne()) {
+ jsTest.log("No oplog was found on shard " + shards[i]._id);
+ continue;
+ }
+
+ var addKeyQuery = function(query, prefix) {
+ for (var k in shardKey) { // eslint-disable-line guard-for-in
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function(cursor) { // eslint-disable-line no-loop-func
+ cursor.forEach(function(doc) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date(doc.ts.getTime() * 1000);
+ allOps.push(doc);
+ });
+ };
+
+ // Find ops
+ addToOps(oplog.find(addKeyQuery({op: 'i'}, 'o')));
+ var updateQuery = {
+ $or: [
+ addKeyQuery({op: 'u'}, 'o2'),
+ {op: 'u', 'o2._id': doc['_id']},
+ ],
+ };
+ addToOps(oplog.find(updateQuery));
+ addToOps(oplog.find({op: 'd', 'o._id': doc['_id']}));
+ }
+
+ var compareOps = function(opA, opB) {
+ if (opA.ts < opB.ts) {
+ return -1;
+ }
+ if (opB.ts < opA.ts) {
+ return 1;
+ }
+ return 0;
+ };
+
+ allOps.sort(compareOps);
+
+ print("Ops found for doc " + tojson(doc) + " on each shard:\n");
+ for (i = 0; i < allOps.length; i++) {
+ printjson(allOps[i]);
+ }
+
+ return allOps;
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-ca.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-ca.pem
new file mode 100644
index 00000000000..2a0e139e184
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-ca.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDpjCCAo6gAwIBAgIDAghHMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy
+dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE0NTY1NVoXDTM2MDMzMTE0NTY1NVow
+fDEfMB0GA1UEAxMWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2Vy
+bmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREw
+DwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQCePFHZTydC96SlSHSyu73vw//ddaE33kPllBB9DP2L7yRF
+6D/blFmno9fSM+Dfg64VfGV+0pCXPIZbpH29nzJu0DkvHzKiWK7P1zUj8rAHaX++
+d6k0yeTLFM9v+7YE9rHoANVn22aOyDvTgAyMmA0CLn+SmUy6WObwMIf9cZn97Znd
+lww7IeFNyK8sWtfsVN4yRBnjr7kKN2Qo0QmWeFa7jxVQptMJQrY8k1PcyVUOgOjQ
+ocJLbWLlm9k0/OMEQSwQHJ+d9weUbKjlZ9ExOrm4QuuA2tJhb38baTdAYw3Jui4f
+yD6iBAGD0Jkpc+3YaWv6CBmK8NEFkYJD/gn+lJ75AgMBAAGjMTAvMAwGA1UdEwQF
+MAMBAf8wHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcN
+AQEFBQADggEBADYikjB6iwAUs6sglwkE4rOkeMkJdRCNwK/5LpFJTWrDjBvBQCdA
+Y5hlAVq8PfIYeh+wEuSvsEHXmx7W29X2+p4VuJ95/xBA6NLapwtzuiijRj2RBAOG
+1EGuyFQUPTL27DR3+tfayNykDclsVDNN8+l7nt56j8HojP74P5OMHtn+6HX5+mtF
+FfZMTy0mWguCsMOkZvjAskm6s4U5gEC8pYEoC0ZRbfUdyYsxZe/nrXIFguVlVPCB
+XnfB/0iG9t+VH5cUVj1LP9skXTW4kXfhQmljUuo+EVBNR6n2nfTnpoC65WeAgHV4
+V+s9mJsUv2x72KtKYypqEVT0gaJ1WIN9N1s=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-client.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-client.pem
new file mode 100644
index 00000000000..dec32375c1b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-client.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDnTCCAoWgAwIBAgIDA1clMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy
+dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE2MDY0OVoXDTM2MDMzMTE2MDY0OVow
+gYAxIzAhBgNVBAMTGlRydXN0ZWQgS2VybmVsIFRlc3QgQ2xpZW50MQ8wDQYDVQQL
+EwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENp
+dHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKI9cGBnH5wcthvFT1FdfQTw1EvOgtfBHVEMRFZH
+bupMnAqP69id0bf7SdBWzx4A1f1ws1RkeL5ot2u5T9NwsFzGvRBQ5onFtDnC3eKB
+OwapCk2B82mlx4xZBjewg+NbxoRJBUWGqB0LykaVUHxM6BGgwExNAyXQ9syPSyNZ
+NIr+zDrLdTfjKklmDkv9jSCB/T3t80kQPY+04u98buUe7wGM0WQFbVNoYrSkZ6Ja
+O+G8bpXP4hXIXsxOHucjBeJc1KR+lxEMw3wInZ2KjjMv7HsFIIOQg5pkMDXibSU6
+cNUZTA2MrzZ+t7TeAQyOTzfGlaatfvJYxU7v4u0W5jxeV60CAwEAAaMjMCEwHwYD
+VR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcNAQEFBQADggEB
+AHI6Rfq/UqAoPxiz5bqby2FnGOrskotgXhn1JkZLGCfllx5WgMuLnu1bvjoym567
+HySqAXQOqEWm6XRU7SVOA+69e4OLWX+HSKjFRuG5Ip67UpihZMgyLKuGRBwfnbXj
+14o+xbWjXCgVZEI7vzT7q/7/W1mXj680fHs93Zog561Id4Tf3DYkOoMawSfeF4iu
+8hcYjlJYjFb3ZvM1wokicmEwtY0+YbBGVo8xh5jYdfCLzYLxc3CpP5eXJtMvGE/x
+RnyiY3f7hkUZMibnREPS6kpQVEh36DT21C0OB8s7TcMU7yMKgVdqL1udmEkiKXTj
+H7v/s+7d54O0tr5+IysCAoA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAoj1wYGcfnBy2G8VPUV19BPDUS86C18EdUQxEVkdu6kycCo/r
+2J3Rt/tJ0FbPHgDV/XCzVGR4vmi3a7lP03CwXMa9EFDmicW0OcLd4oE7BqkKTYHz
+aaXHjFkGN7CD41vGhEkFRYaoHQvKRpVQfEzoEaDATE0DJdD2zI9LI1k0iv7MOst1
+N+MqSWYOS/2NIIH9Pe3zSRA9j7Ti73xu5R7vAYzRZAVtU2hitKRnolo74bxulc/i
+FchezE4e5yMF4lzUpH6XEQzDfAidnYqOMy/sewUgg5CDmmQwNeJtJTpw1RlMDYyv
+Nn63tN4BDI5PN8aVpq1+8ljFTu/i7RbmPF5XrQIDAQABAoIBAGg9iYKnP4wSdn+J
+WtkwdC9EfWLnoPH3RlrYwt+crgskhe3TYvmfDSxk7JxL6m+god1hGBfVJi9RIOi5
+/Cwib25s0vU0xasnuBCUv/PUjJRO8Cu0nyz2Myxd1rzZUSQ3x2kfcZ+mUUW4WZLY
+RQpYb5ND8coUgT0+8hOkzeY8XqIe5c4VrX16mA+uoIMsr4QHxe0pl59oY57V3Der
++gsaGuWZ5hDvfuoCOx03Cuc1pTx0T8ZHdliu/xe+np3ETFdQ/1cyJMAJW3w15qKt
+L6AfkeRaMAgqxs2zU1rgPdJddRS7MaSJnpDtMjeyJpeNCMDJ/h3ihgSM1SM1QCtY
+tcnWdIECgYEA1/SVGV9McAvtVtQA1D7kPEo0ifCG8frUfGy2yK9aasFHhLS3JXXY
+4R0Fy/pOwdKNtnN3ZKd0Y0wcmlwPepg2HKlUFJdjqEKZctMzOscy3n08F4AV2rLc
+48q2XLLIQNN/JuqcaeRgQByvP6YL0YuqqsAPiRhTeYgJxp4c+JgbmDUCgYEAwFL7
+jzYwmud3HEgrfpXxDoWpemlKqCqe0cUix0OtR8XfCukZ5lbnC7Mu/FtqBitVdQYd
+2r1nRK66fTJejblNd1E4TG0sIwucI5B24I7XeG78r2kECnFT+vBE8BA6c/y8nTjz
+grWVMeR3n7WFxaTL/VW/kapW2YddWPq1Jh4q4JkCgYB2QBk8kXBbkkxd1Sy3C9ES
+KlfmiGx8KCseDrFv5oUOG9O7mPEIdCVT7v5zmRZzD4ZFt6hS11bl4JFw/KQFLz/C
+Jf5CYDtTsVQxCfDtaJI0PkMfYyWUYYiOuztsOwFobeccOi931HPX510W7yddkKrd
+YNmg6k8bJyCjP4UBotjJWQKBgElP2KDQ0VpbHWZkhF/unEMi5GXLOTA9fukLsqQu
+wiD35nvsO3k4az5kgWalGhdb8Wl4eWzmgjUGPgR3bN+tYUA4b7OCci6xwEU2Tnpv
+OOeptxzOdUHdzVt8t2qjZQTNtMBh80FCIqswIgF5WpLqrO/W/f1y50Roe0bt2pu7
+KDERAoGAbhEL6OCfX6Pf5hCggb4ymm9zmAndRKHAAJ2WQ2p6v+r1vm838n6y8r7Q
+Fqc3B7NIDDYzX4oyQZepOnHNF/UPEyVyGvJ8LBDruyiuAdGakVEvSHZ+ml4LnS06
+msP5hsHh9s4ptVcaF3/mNllBys+FwEWvLewgfVPJrDBNINFvYZ8=
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-server.pem b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-server.pem
new file mode 100644
index 00000000000..caaee422a44
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/trusted-server.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDnTCCAoWgAwIBAgIDCWhIMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy
+dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE2MDUyM1oXDTM2MDMzMTE2MDUyM1ow
+gYAxIzAhBgNVBAMTGlRydXN0ZWQgS2VybmVsIFRlc3QgU2VydmVyMQ8wDQYDVQQL
+EwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENp
+dHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAJwOsMO+MhWmHcSOdjFrZYjPMOt8uIqJ1pR/JI/E
+hTdUq7fXPHZNhUTzwX1JZB+QkXhnJiAf2ZQOnl7R49vudoPdOZo6bJQP8/Hy5F79
+B8Nw9xrcPuzGBRK3IpY7j9gnAOC5jvN2zh+nHoqNhPmarpKgbDeLosABiDFSHiCE
+degHziJ0Tj0AJ6GRbeHeTvv5K4lLwMzyYnpkG0cMpLvLIUwJa22Vp8PujMcmjX9W
+ASmSXJmcszYKjaRc7HB6ronIEZWy//PSXlvuk8xYaM40HkGy2gN6wV+2Z45QdDds
+NxUuu56TzJ7z7as/vYXXsIc/TSmvM02S01JWUjWeVGc1sb8CAwEAAaMjMCEwHwYD
+VR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcNAQEFBQADggEB
+AAKLZiQxz3NYvc04GDMRIUDfR50RMw4SuXXgGYTpPUP/akOes11+u5iKhgyKQ+ca
+TttX8mwwvNjQFN8hjBodsrWK9avMUenJBk+Y2ztzLSpKAmC7NUUM6sFB1D3yocsG
+aH5EuyH/dcAdb9z5vYurriRfd1ldmyGAqvA6lKGp1zxTAi0WWbYIZia0LyVoH98p
+x0s+amrSMvkVqIK+qV+CVqW2dNLe+kREjGxzGidCSfHZrHncuTX8/10xHUbAQW0z
+EWF6epmm+jniwgh2Zs/xe7+eY1Nzfq0ly06MVKCs1/lZ0vhAHGZ7V6yBX5zig02x
+VAHb45KqzmYGwKErO7ZFY2I=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAnA6ww74yFaYdxI52MWtliM8w63y4ionWlH8kj8SFN1Srt9c8
+dk2FRPPBfUlkH5CReGcmIB/ZlA6eXtHj2+52g905mjpslA/z8fLkXv0Hw3D3Gtw+
+7MYFErciljuP2CcA4LmO83bOH6ceio2E+ZqukqBsN4uiwAGIMVIeIIR16AfOInRO
+PQAnoZFt4d5O+/kriUvAzPJiemQbRwyku8shTAlrbZWnw+6MxyaNf1YBKZJcmZyz
+NgqNpFzscHquicgRlbL/89JeW+6TzFhozjQeQbLaA3rBX7ZnjlB0N2w3FS67npPM
+nvPtqz+9hdewhz9NKa8zTZLTUlZSNZ5UZzWxvwIDAQABAoIBAQCQFHQY1NHy8OKM
+5aaz697bV8dns0fCCI7HnTdJUPxZYGAGJL8azmmbhp1+qbK5/cSA8GLfx+ge7PxE
+uO3x0RE0n5weC5DRhoUIPeOg22Y+iF5sOyoReqWWaOSS5bzhqOkDke4sU+TsjmQB
+MbWyqaBBmcEv60jAkumF97X++azOIm1EqTXfSu1K7gqtiL9H9T8vIYOOuTAduOsD
+el/v5QQbWb3e/NLhcmzHL6rPcR/9jCn1rJ9HAhAqm6eKZS2cAgTGLLtCUhumVliO
+bEIm2fcQ5h+BDZc5EF/SURKvUaFx/xTIQ5s1oEKN8iN+kIYzgbZ/Ds/GOo7nWVmy
+1KZswK05AoGBANBvT/vSpI7vokmph+GifjToHeinceg3pssf8mHw8xv3H0mZxBkt
+CJq6rFwKwMH8K9tQfBqp4hfVgfdAWZyKqqo1Mtecohzb9D0GLYZ6Of18pAZK/aEt
+L8ADuGYbLAFAS10z4djBSqlud82d194zSgfLP3FYRsj5Ni8w9bPuMOKVAoGBAL+r
+gd1/B+kkbO/NAprjXAFT91Wjf+YMQgM8vOMXlq7LlGeGQSb0zVZpdDtZ3ohqdu1i
+38y0G/CvBLddm8VkC3/fhfO8xW8PjdRBbF87j1k4HerAxcLOO91z+MHFMbUryOJc
+U0aAzJB3B4E491xaXTL8jZLYxmgJtc4jBcLKzmIDAoGBAKXf39w9Hx5tUE6k7vE+
+uodqLdsn3nt6Rm+iRedxtFcODEUrbKbIcu+IHYDGQe5eu5w2af1iMv7auCpHeMke
+hYEdAxAZo92poa4qy3IYtSuo1HP5m+x3pGd/znDbsOJyA0fx8QrpkHxT4F2u/srj
+MEgRlLSkFvj7cwaNRQvjQ94dAoGAA2+4wVbgtm5fwaDkVhCTeradrZxj06UOne49
+2Lh4jCO8QmrmyiMDd3QmkFXZJor6HOFz78Ce657Hr93uyAg2KJHCXg9ZXtdhjJer
+sL1poYjfCHFyWj7GVf8ZS6gUbxIc5OoQ2CfBAyoPKWLzFGXOW/apNyPJ0t2xs8Nu
+/AIU1y8CgYEAyUhyJe8XaDOjoR9D1freHY6Vt9NkofDpYfWL4+y/kGD45yCDBXj0
+LYAD89/Qog1MbPN8FGrgMu2b3dI0i+iZlWduvQRn71QepT6wiB84ivxjECSpZGoH
+2F0SM1MVAK/f4Dm9H9Kaukq2BpsN8Uhvzg2EUFg1mLJ+OBArgT524Ys=
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/wc_framework.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/wc_framework.js
new file mode 100644
index 00000000000..a707b21d7df
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/libs/wc_framework.js
@@ -0,0 +1,118 @@
+// runWCTest executes a tool against a number of configurations. A given replica set will have nodes prevented
+// from replicating and the tool should either pass or fail based on the supplied write concern. As a final test,
+// the tools is run with w:3, and waits for all three nodes to come back online, simulating a slowly-replicated write.
+var runWCTest = function runWCTest(progName, rs, toolTest, testWriteConcern, testProgramNoConnect, testSetupFunction) {
+ jsTest.log("testing that "+progName+" deals with write concern");
+
+ function windowsEscape(json) {
+ if (_isWindows()) {
+ json = '"' + json.replace(/"/g, '\\"') + '"';
+ }
+ return json;
+ }
+
+ function stopSync(nodes) {
+ jsTest.log("stopping "+nodes.length+" nodes");
+ for (var i = 0; i < nodes.length; i++) {
+ nodes[i].runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ }
+ sleep(2000);
+ }
+
+ function startSync(nodes) {
+ jsTest.log("starting "+nodes.length+" nodes");
+ for (var i = 0; i < nodes.length; i++) {
+ nodes[i].runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ }
+ }
+
+ function loggedTestSetup() {
+ if (testSetupFunction) {
+ jsTest.log("running test setup");
+ testSetupFunction();
+ }
+ }
+
+ if (!testSetupFunction) {
+ testSetupFunction = function() {};
+ }
+
+ // grab the two secondary nodes
+ var masterPort = rs.getPrimary().port;
+ var members = [];
+ var stopped = [];
+ var ports = [];
+ for (var i = 0; i < rs.nodes.length; i++) {
+ if (rs.nodes[i].port !== masterPort) {
+ members.push(rs.nodes[i].getDB("admin"));
+ ports.push(rs.nodes[i].port);
+ }
+ }
+ var member1 = members[0];
+ var member2 = members[1];
+
+ loggedTestSetup();
+ testWriteConcern(0, [], progName+" without write concern to a fully functioning repl-set should succeed");
+
+ loggedTestSetup();
+ testWriteConcern(0, ['--writeConcern=majority'], progName+" with majority to a fully functioning repl-set should succeed");
+
+ loggedTestSetup();
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 to a fully functioning repl-set should succeed");
+
+ loggedTestSetup();
+ testWriteConcern(0, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 to a fully functioning repl-set should succeed");
+
+ jsTest.log("stopping node on port " + ports[0] + " from doing any further syncing");
+ stopped.push(member1);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 repl-set with 2 working nodes should succeed");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(0, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 repl-set with 2 working nodes should succeed");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(0, ['--writeConcern=majority'], progName+" with majority with two working nodes should succeed");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(1, ['--writeConcern={w:3,wtimeout:2000}'], progName+" with w:3,timeout:2000 repl-set with two working nodes should fail");
+ startSync(stopped);
+
+ jsTest.log("stopping second node on port " + ports[1] + " from doing any further syncing");
+ stopped.push(member2);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(1, [windowsEscape('--writeConcern={w:"majority",wtimeout:2000}')], progName+" with majority with one working node should fail");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(1, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 with one working node should fail");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 repl-set with one working nodes should succeed");
+ startSync(stopped);
+
+ loggedTestSetup();
+ stopSync(stopped);
+ jsTest.log(progName+" with w:3 concern and no working member and no timeout waits until member are available");
+ pid = testProgramNoConnect();
+ sleep(2000);
+ assert(checkProgram(pid), progName+" with w:3 and no working members should not have finished");
+ startSync(stopped);
+
+ jsTest.log("waiting for "+progName+" to finish");
+ ret = waitProgram(pid);
+ assert.eq(0, ret, progName+" with w:3 should succeed once enough members start working");
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/15k_collections.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/15k_collections.js
new file mode 100644
index 00000000000..7bdbaceab60
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/15k_collections.js
@@ -0,0 +1,38 @@
+// this tests that we can restore a large number of collections, resolving
+// an issue raised by TOOLS-1088
+// @tags: [requires_many_files, requires_large_ram]
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('15k_collections');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+
+ for (var i=0; i<=15000; i++) {
+ collName = "Coll" + i;
+ dbOne.createCollection(collName);
+ }
+
+ // dump it
+ var dumpTarget = '15k_collections_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/26_to_28.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/26_to_28.js
new file mode 100644
index 00000000000..d38808887ae
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/26_to_28.js
@@ -0,0 +1,67 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 3.0 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore to restore a dump from a 2.6 mongod to a 3.0 mongod.
+ jsTest.log('Testing running mongorestore restoring data from a 2.6 mongod to'+
+ ' a 3.0 mongod');
+
+ var toolTest = new ToolTest('26_to_28', {binVersion: '2.6'});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = '26_to_28_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some documents
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // restart the mongod as a 3.0
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ delete toolTest.options.binVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db and coll reference
+ testDB = toolTest.db.getSiblingDB('test');
+ testColl = testDB.coll;
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/28_to_26.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/28_to_26.js
new file mode 100644
index 00000000000..decc904465f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/28_to_26.js
@@ -0,0 +1,68 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore to restore a dump from a 2.8 mongod to a 2.6 mongod.
+
+ jsTest.log('Testing running mongorestore restoring data from a 2.8 mongod to'+
+ ' a 2.6 mongod');
+
+ var toolTest = new ToolTest('28_to_26');
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = '28_to_26_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some documents
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db and coll reference
+ testDB = toolTest.db.getSiblingDB('test');
+ testColl = testDB.coll;
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/archive_stdout.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/archive_stdout.js
new file mode 100644
index 00000000000..825d95916f0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/archive_stdout.js
@@ -0,0 +1,54 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = getToolTest('archive_stdout');
+ var baseArgs = getCommonToolArguments();
+ baseArgs = baseArgs.concat('--port', toolTest.port);
+
+ if (toolTest.useSSL) {
+ baseArgs = baseArgs.concat([
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslAllowInvalidHostnames']);
+ }
+ if (dump_targets === 'gzip') {
+ baseArgs = baseArgs.concat('--gzip');
+ }
+ var dumpArgs = ['mongodump', '--archive'].concat(baseArgs);
+ var restoreArgs = ['mongorestore', '--archive', '--drop'].concat(baseArgs);
+
+ dumpArgs[0] = 'PATH=.:$PATH ' + dumpArgs[0];
+ restoreArgs[0] = 'PATH=.:$PATH ' + restoreArgs[0];
+ if (_isWindows()) {
+ dumpArgs[0] += '.exe';
+ restoreArgs[0] += '.exe';
+ }
+
+ var testDb = toolTest.db;
+ testDb.dropDatabase();
+ var fooData = [];
+ var barData = [];
+ for (var i = 0; i < 500; i++) {
+ fooData.push({i: i});
+ barData.push({i: i*5});
+ }
+ testDb.foo.insertMany(fooData);
+ testDb.bar.insertMany(barData);
+ assert.eq(500, testDb.foo.count(), 'foo should have our test documents');
+ assert.eq(500, testDb.bar.count(), 'bar should have our test documents');
+
+ var ret = runProgram('bash', '-c', dumpArgs.concat('|', restoreArgs).join(' '));
+ assert.eq(0, ret, "bash execution should succeed");
+
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, testDb.foo.find({i: i}).count(), 'document #'+i+' not in foo');
+ assert.eq(1, testDb.bar.find({i: i*5}).count(), 'document #'+i+' not in bar');
+ }
+ assert.eq(500, testDb.foo.count(), 'foo should have our test documents');
+ assert.eq(500, testDb.bar.count(), 'bar should have our test documents');
+
+ testDb.dropDatabase();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/bad_options.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/bad_options.js
new file mode 100644
index 00000000000..07b6742637d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/bad_options.js
@@ -0,0 +1,54 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with bad command line options.
+
+ jsTest.log('Testing running mongorestore with bad'+
+ ' command line options');
+
+ var toolTest = new ToolTest('incompatible_flags');
+ toolTest.startDB('foo');
+
+ // run restore with both --objcheck and --noobjcheck specified
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--objcheck', '--noobjcheck']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // run restore with --oplogLimit with a bad timestamp
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay', '--oplogLimit',
+ 'xxx']
+ .concat(getRestoreTarget('restore/testdata/dump_with_oplog')));
+ assert.neq(0, ret);
+
+ // run restore with a negative --w value
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--w', '-1']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid db name
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'billy.crystal']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid collection name
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', '$money']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid verbosity value
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '-v', 'torvalds']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_collection_bson.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_collection_bson.js
new file mode 100644
index 00000000000..43532f0d546
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_collection_bson.js
@@ -0,0 +1,43 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore to restore data from a blank collection
+ // file, with both a missing and blank metadata file.
+
+ jsTest.log('Testing restoration from a blank collection file');
+
+ var toolTest = getToolTest('blank_collection_bson');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with the blank collection file and no
+ // metadata file. it should succeed, but insert nothing.
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(0, toolTest.db.getSiblingDB('test').blank.count());
+
+ // run the restore with the blank collection file and a blank
+ // metadata file. it should succeed, but insert nothing.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank_metadata.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(0, toolTest.db.getSiblingDB('test').blank.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_db.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_db.js
new file mode 100644
index 00000000000..1d3c85e3e0b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/blank_db.js
@@ -0,0 +1,29 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ // Tests using mongorestore to restore data from a blank db directory.
+
+ jsTest.log('Testing restoration from a blank db directory');
+
+ var toolTest = getToolTest('blank_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with the blank db directory. it should succeed, but
+ // insert nothing.
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/collation.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/collation.js
new file mode 100644
index 00000000000..ff7ebdbc997
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/collation.js
@@ -0,0 +1,73 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that mongorestore correctly restores collections with a default collation.
+
+ jsTest.log('Testing restoration of a collection with a default collation');
+
+ var toolTest = getToolTest('collation');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dumpTarget = 'collation_dump';
+ resetDbpath(dumpTarget);
+
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // Create a collection with a default collation.
+ assert.commandWorked(testDB.createCollection('coll', {collation: {locale: 'fr_CA'}}));
+ var collectionInfos = testDB.getCollectionInfos({name: 'coll'});
+ assert.eq(collectionInfos.length, 1);
+ assert(collectionInfos[0].options.hasOwnProperty('collation'), tojson(collectionInfos[0]));
+ var collationBefore = collectionInfos[0].options.collation;
+
+ // Dump the data.
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Drop the collection.
+ testColl.drop();
+
+ // Restore the data.
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ collectionInfos = testDB.getCollectionInfos({name: 'coll'});
+ assert.eq(collectionInfos.length, 1);
+ assert(collectionInfos[0].options.hasOwnProperty('collation'), tojson(collectionInfos[0]));
+ var collationAfter = collectionInfos[0].options.collation;
+
+ // Check that the collection was restored with the same collation.
+ assert.docEq(collationBefore, collationAfter, tojson(collationBefore) + tojson(collationAfter));
+
+ if (dump_targets === 'archive') {
+ jsTest.log('skipping bson file restore test while running with archiving');
+ } else {
+ // Drop the collection.
+ testColl.drop();
+
+ // Restore the data, but this time mentioning the bson file specifically.
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget+'/test/coll.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ collectionInfos = testDB.getCollectionInfos({name: 'coll'});
+ assert.eq(collectionInfos.length, 1);
+ assert(collectionInfos[0].options.hasOwnProperty('collation'), tojson(collectionInfos[0]));
+ collationAfter = collectionInfos[0].options.collation;
+
+ // Check that the collection was restored with the same collation.
+ assert.docEq(collationBefore, collationAfter, tojson(collationBefore) + tojson(collationAfter));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js
new file mode 100644
index 00000000000..381e59a5216
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js
@@ -0,0 +1,138 @@
+// This test requires mongo 2.6.x releases
+// @tags: [requires_mongo_26]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests using mongorestore to restore a dump containing users. If there is
+ // conflicting authSchemaVersion in the admin.system.version document, it
+ // should be ignored, and the restore should complete successfully.
+
+ jsTest.log('Testing restoring a dump with a potentially conflicting'+
+ ' authSchemaVersion in the database');
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion, shouldSucceed) {
+
+ jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') +
+ ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' +
+ (restoreVersion || 'latest') + ', and destDBVersion=' +
+ (destDBVersion || 'latest') + ', expected to pass=' + shouldSucceed);
+
+ var toolTest = new ToolTest('conflicting_auth_schema_version',
+ {binVersion: sourceDBVersion, auth: ''});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'conflicting_auth_schema_version_dump';
+ resetDbpath(dumpTarget);
+
+ // the admin db, and the non-admin db we'll be using
+ var adminDB = toolTest.db.getSiblingDB('admin');
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create a user admin
+ adminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdminAnyDatabase', db: 'admin'},
+ {role: 'readWriteAnyDatabase', db: 'admin'},
+ {role: 'backup', db: 'admin'},
+ ],
+ });
+ var authInfo = {user: 'admin', pwd: 'password'};
+ if (sourceDBVersion === "2.6") {
+ authInfo.mechanism = "MONGODB-CR";
+ }
+ assert.eq(1, adminDB.auth(authInfo));
+
+ // add some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+
+ // sanity check the data was inserted
+ assert.eq(10, testDB.data.count());
+
+ // dump all the data
+ args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''),
+ '--username', 'admin',
+ '--password', 'password', '--port', toolTest.port]
+ .concat(getDumpTarget(dumpTarget));
+ if (sourceDBVersion === "2.6") {
+ args.push("--authenticationMechanism=MONGODB-CR");
+ }
+ var ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // restart the mongod, with a clean db path
+ stopMongod(toolTest.port);
+ resetDbpath(toolTest.dbpath);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options.binVersion = destDBVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db references
+ adminDB = toolTest.db.getSiblingDB('admin');
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // create a new user admin
+ adminDB.createUser({
+ user: 'admin28',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdminAnyDatabase', db: 'admin'},
+ {role: 'readWriteAnyDatabase', db: 'admin'},
+ {role: 'restore', db: 'admin'},
+ ],
+ });
+
+ var authInfoDest = {user: 'admin28', pwd: 'password'};
+ if (destDBVersion === "2.6") {
+ authInfoDest.mechanism = "MONGODB-CR";
+ }
+ assert.eq(1, adminDB.auth(authInfoDest));
+
+ // do a full restore
+ args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''),
+ '--username', 'admin28',
+ '--password', 'password',
+ '--port', toolTest.port,
+ '--stopOnError']
+ .concat(getRestoreTarget(dumpTarget));
+
+ ret = runMongoProgram.apply(this, args);
+
+ if (shouldSucceed) {
+ assert.eq(0, ret);
+ // make sure the data and users are all there
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+ var users = adminDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'admin' || users[1].user === 'admin');
+ assert(users[0].user === 'admin28' || users[1].user === 'admin28');
+ } else {
+ assert.neq(0, ret);
+ }
+ // success
+ toolTest.stop();
+ };
+
+ // 'undefined' triggers latest
+ runTest('2.6', '2.6', undefined, '2.6', true);
+ runTest('2.6', '2.6', undefined, undefined, true);
+ runTest('2.6', undefined, undefined, undefined, true);
+ runTest(undefined, undefined, undefined, '2.6', false);
+ runTest(undefined, undefined, undefined, undefined, true);
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_collection.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_collection.js
new file mode 100644
index 00000000000..0d061934cd8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_collection.js
@@ -0,0 +1,91 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to a different collection
+ // then it was dumped from.
+
+ jsTest.log('Testing restoration to a different collection');
+
+ if (dump_targets === 'archive') {
+ jsTest.log('Skipping test unsupported against archive targets');
+ return assert(true);
+ }
+
+ var toolTest = getToolTest('different_collection');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'different_collection_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+ // the collection we will dump from
+ var sourceCollName = 'sourceColl';
+
+ // insert a bunch of data
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i});
+ }
+ sourceDB[sourceCollName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[sourceCollName].count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // restore just the collection into a different collection
+ // in the same database
+ var destCollName = 'destColl';
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'source',
+ '--collection', destCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, sourceDB[destCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, sourceDB[destCollName].count({_id: i}));
+ }
+
+ // restore just the collection into a similarly-named collection
+ // in a different database
+ var destDB = toolTest.db.getSiblingDB('dest');
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dest',
+ '--collection', sourceCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, destDB[sourceCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, destDB[sourceCollName].count({_id: i}));
+ }
+
+ // restore just the collection into a different collection
+ // in a different database
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dest',
+ '--collection', destCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, destDB[destCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, destDB[destCollName].count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_db.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_db.js
new file mode 100644
index 00000000000..237eb86b2b4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/different_db.js
@@ -0,0 +1,86 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to a different db than
+ // it was dumped from.
+
+ jsTest.log('Testing restoration to a different db');
+
+ if (dump_targets === 'archive') {
+ jsTest.log('Skipping test unsupported against archive targets');
+ return assert(true);
+ }
+
+ var toolTest = getToolTest('different_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'different_db_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+ // the db we will restore to
+ var destDB = toolTest.db.getSiblingDB('dest');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // we'll use two collections
+ var collNames = ['coll1', 'coll2'];
+
+ // insert a bunch of data
+ collNames.forEach(function(collName) {
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ sourceDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[collName].count());
+ });
+
+ // dump the data
+ ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // restore the data to a different db
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dest']
+ .concat(getRestoreTarget(dumpTarget+'/source'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ collNames.forEach(function(collName) {
+ assert.eq(500, destDB[collName].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, destDB[collName].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // restore the data to another different db
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--nsFrom', '$db$.$collection$',
+ '--nsTo', 'otherdest.$db$_$collection$']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ destDB = toolTest.db.getSiblingDB('otherdest');
+ collNames.forEach(function(collName) {
+ assert.eq(500, destDB['source_'+collName].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, destDB['source_'+collName].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_authenticated_user.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_authenticated_user.js
new file mode 100644
index 00000000000..25d286524c6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_authenticated_user.js
@@ -0,0 +1,111 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with --drop and --restoreDbUsersAndRoles,
+ // in addition to --auth, and makes sure the authenticated user does not
+ // get dropped before it can complete the restore job.
+
+ jsTest.log('Testing dropping the authenticated user with mongorestore');
+
+ var toolTest = new ToolTest('drop_authenticated_user', {auth: ''});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_authenticated_user_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll use the admin db so that the user we are restoring as
+ // is part of the db we are restoring
+ var adminDB = toolTest.db.getSiblingDB('admin');
+
+ // create the users we'll need for the dump
+ adminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdmin', db: 'admin'},
+ {role: 'readWrite', db: 'admin'},
+ ],
+ });
+ adminDB.auth('admin', 'password');
+
+ adminDB.createUser({
+ user: 'backup',
+ pwd: 'password',
+ roles: [{role: 'backup', db: 'admin'}],
+ });
+
+ // create a role
+ adminDB.createRole({
+ role: 'extraRole',
+ privileges: [{
+ resource: {db: 'admin', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ adminDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, adminDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--username', 'backup',
+ '--password', 'password']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop all the data, but not the users or roles
+ adminDB.data.remove({});
+ // sanity check the removal worked
+ assert.eq(0, adminDB.data.count());
+
+ // now create the restore user, so that we can use it for the restore but it is
+ // not part of the dump
+ adminDB.createUser({
+ user: 'restore',
+ pwd: 'password',
+ roles: [{role: 'restore', db: 'admin'}],
+ });
+
+ // insert some data to be removed when --drop is run
+ data = [];
+ for (i = 10; i < 20; i++) {
+ data.push({_id: i});
+ }
+ adminDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, adminDB.data.count());
+
+ // restore the data, specifying --drop
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--drop',
+ '--username', 'restore',
+ '--password', 'password']
+ .concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the existing data was removed, and replaced with the dumped data
+ assert.eq(10, adminDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, adminDB.data.count({_id: i}));
+ }
+
+ // make sure the correct roles and users exist - that the restore user was dropped
+ var users = adminDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'backup' || users[1].user === 'backup');
+ assert(users[0].user === 'admin' || users[1].user === 'admin');
+ assert.eq(1, adminDB.getRoles().length);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_nonexistent_db.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_nonexistent_db.js
new file mode 100644
index 00000000000..36cb99338f1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_nonexistent_db.js
@@ -0,0 +1,58 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop on a database with
+ // nothing to drop does not error out, and completes the
+ // restore successfully.
+
+ jsTest.log('Testing restoration with --drop on a nonexistent db');
+
+ var toolTest = getToolTest('drop_nonexistent_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_nonexistent_db_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // insert a bunch of data
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i});
+ }
+ testDB.coll.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, testDB.coll.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database we are using
+ testDB.dropDatabase();
+ // sanity check the drop worked
+ assert.eq(0, testDB.coll.count());
+
+ // restore the data with --drop
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(500, testDB.coll.count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, testDB.coll.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_one_collection.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_one_collection.js
new file mode 100644
index 00000000000..9a6c856ee32
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_one_collection.js
@@ -0,0 +1,92 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop and --collection leaves data
+ // in other collections untouched (that --drop only applies to the
+ // specified collection).
+
+ jsTest.log('Testing restoration with --drop and --collection, with data in'+
+ ' other collections');
+
+ var toolTest = getToolTest('drop_one_collection');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_one_collection_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will take the dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+
+ // dump from two different collections, even though we'll
+ // only be restoring one.
+ var collNames = ['coll1', 'coll2'];
+ collNames.forEach(function(collName) {
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ sourceDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[collName].count());
+ });
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop and replace the data
+ collNames.forEach(function(collName) {
+ sourceDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, sourceDB[collName].count());
+
+ // insert a disjoint set of data from the dump
+ var data = [];
+ for (var i = 500; i < 600; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ sourceDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(100, sourceDB[collName].count());
+ });
+
+ // insert data into the same collections in a different db
+ var otherDB = toolTest.db.getSiblingDB('other');
+ collNames.forEach(function(collName) {
+ var data = [];
+ for (var i = 500; i < 600; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ otherDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(100, otherDB[collName].count());
+ });
+
+ // restore with --drop and --collection
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop',
+ '--db', 'source',
+ '--collection', 'coll1']
+ .concat(getRestoreTarget(dumpTarget+'/source/coll1.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the dumped data replaced the old data in only
+ // the specified collection, and all other data was left untouched
+ assert.eq(500, sourceDB.coll1.count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, sourceDB.coll1.count({_id: i+'_coll1'}));
+ }
+ assert.eq(100, sourceDB.coll2.count());
+ assert.eq(100, otherDB.coll1.count());
+ assert.eq(100, otherDB.coll2.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_with_data.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_with_data.js
new file mode 100644
index 00000000000..56fc1a93473
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/drop_with_data.js
@@ -0,0 +1,77 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop drops existing data
+ // before restoring.
+
+ jsTest.log('Testing restoration with --drop on existing data');
+
+ var toolTest = getToolTest('drop_with_data');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_with_data_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use two collections, to make sure they both
+ // get dropped appropriately
+ var collNames = ['coll1', 'coll2'];
+
+ // insert a bunch of data to be dumped
+ collNames.forEach(function(collName) {
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ testDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, testDB[collName].count());
+ });
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop all the data, and replace it with different data
+ collNames.forEach(function(collName) {
+ testDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, testDB[collName].count());
+
+ var data = [];
+ for (var i = 500; i < 600; i++) {
+ data.push({_id: i+'_'+collName});
+ }
+ testDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(100, testDB[collName].count());
+ });
+
+ // restore with --drop. the current data in all collections should
+ // be removed and replaced with the dumped data
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the dumped data was restored, and the old data
+ // was dropped
+ collNames.forEach(function(collName) {
+ assert.eq(500, testDB[collName].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, testDB[collName].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/duplicate_keys.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/duplicate_keys.js
new file mode 100644
index 00000000000..acc261dba11
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/duplicate_keys.js
@@ -0,0 +1,75 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore a mix of existing and
+ // non-existing documents to a collection, so we can make sure
+ // all new documents are actually added.
+
+ jsTest.log('Testing restoration of a dump on top of existing documents');
+
+ var toolTest = getToolTest('dupe_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'dupe_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll insert data into three collections spread across two dbs
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var testColl = dbOne.duplicates;
+
+ // insert a bunch of data
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // remove a few random documents
+ var removeDocs = function() {
+ testColl.remove({_id: 0});
+ testColl.remove({_id: 5});
+ testColl.remove({_id: 6});
+ testColl.remove({_id: 9});
+ testColl.remove({_id: 12});
+ testColl.remove({_id: 27});
+ testColl.remove({_id: 40});
+ testColl.remove({_id: 46});
+ testColl.remove({_id: 47});
+ testColl.remove({_id: 49});
+ assert.eq(40, testColl.count());
+ };
+ removeDocs();
+
+ // restore the db with default settings
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and all of the removed keys were restored
+ assert.eq(50, testColl.count(), "some documents were not restored with default settings");
+
+ // now check an array of batch sizes
+ for (i = 1; i < 100; i++) {
+ removeDocs();
+ ret = toolTest.runTool.apply(toolTest, ['restore', "--batchSize", String(i)]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(50, testColl.count(), "some documents were not restored for batchSize="+i);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/empty_users_and_roles.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/empty_users_and_roles.js
new file mode 100644
index 00000000000..090dd746043
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/empty_users_and_roles.js
@@ -0,0 +1,33 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles, with
+ // no users or roles in the dump.
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles with'+
+ ' no users or roles in the dump');
+
+ var toolTest = getToolTest('empty_users_and_roles');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with no users or roles. it should succeed, but create no
+ // users or roles
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/extended_json_metadata.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/extended_json_metadata.js
new file mode 100644
index 00000000000..59d9997262e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/extended_json_metadata.js
@@ -0,0 +1,42 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests that using mongorestore on a collection with extended json types
+ // in the metadata (both indexes and options) is handled gracefully.
+
+ jsTest.log('Testing that restoration of extended JSON collection options works.');
+
+ var toolTest = getToolTest('extended_json_metadata_restore');
+ var commonToolArgs = getCommonToolArguments();
+ var testDB = toolTest.db.getSiblingDB('test');
+ assert.eq(testDB.changelog.exists(), null, "collection already exists in db");
+
+ // run a restore against the mongos
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_extended_json_options'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "the restore does not crash");
+
+ var collectionOptionsFromDB = testDB.changelog.exists();
+ printjson(collectionOptionsFromDB);
+ assert.eq(collectionOptionsFromDB.options.capped, true, "capped option should be restored");
+ // Mongodb might fudge the collection max values for different storage engines,
+ // so we need some wiggle room.
+ var delta = 1000;
+ var size = 10 * 1000 * 1000;
+ assert.lte(collectionOptionsFromDB.options.size, size+delta, "size should be ~10000000");
+ assert.gte(collectionOptionsFromDB.options.size, size-delta, "size should be ~10000000");
+
+ var indexes = testDB.changelog.getIndexes();
+ printjson(indexes);
+ assert.eq(indexes[0].key._id, 1, "index is read properly");
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/index_version_roundtrip.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/index_version_roundtrip.js
new file mode 100644
index 00000000000..8eca087f5dd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/index_version_roundtrip.js
@@ -0,0 +1,107 @@
+// @tags: [requires_mongo_34](
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that mongorestore correctly round-trips _id index versions.
+
+ jsTest.log('Testing restoration of different types of indexes');
+
+ var toolTest = getToolTest('index_version_roundtrip');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var name = 'idx_version_rt_dump';
+ resetDbpath(name);
+
+ var testDB = toolTest.db.getSiblingDB(name);
+
+ // drop the db
+ testDB.dropDatabase();
+
+ assert.commandWorked(testDB.runCommand({
+ create: "coll1",
+ idIndex: {
+ v: 1,
+ key: {
+ _id: 1
+ },
+ name: "_id_",
+ ns: name + ".coll1",
+ }
+ }));
+ assert.commandWorked(testDB.runCommand({
+ create: "coll2",
+ idIndex: {
+ v: 2,
+ key: {
+ _id: 1
+ },
+ name: "_id_",
+ ns: name + ".coll2",
+ }
+ }));
+
+ // create an aditional index to verify non _id indexes work
+ assert.commandWorked(testDB.coll1.ensureIndex({a: 1}, {v: 1}));
+ assert.commandWorked(testDB.coll2.ensureIndex({a: 1}, {v: 2}));
+
+ // insert arbitrary data so the collections aren't empty
+ testDB.coll1.insert({a: 123});
+ testDB.coll2.insert({a: 123});
+
+ // store the index specs, for comparison after dump / restore
+ var idxSorter = function(a, b) {
+ return a.name.localeCompare(b.name);
+ };
+
+ var idxPre1 = testDB.coll1.getIndexSpecs();
+ idxPre1.sort(idxSorter);
+ var idxPre2 = testDB.coll2.getIndexSpecs();
+ idxPre2.sort(idxSorter);
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(name))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the db
+ testDB.dropDatabase();
+ // sanity check that the drop worked
+ assert.eq(0, db.runCommand({
+ listCollections: 1
+ }).cursor.firstBatch.length);
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--keepIndexVersion']
+ .concat(getRestoreTarget(name))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(1, testDB.coll1.find().itcount());
+ assert.eq(1, testDB.coll2.find().itcount());
+
+ // make sure the indexes were restored correctly
+ var idxPost1 = testDB.coll1.getIndexSpecs();
+ idxPost1.sort(idxSorter);
+ assert.eq(idxPre1.length, idxPost1.length,
+ "indexes before: " + tojson(idxPre1) + "\nindexes after: " + tojson(idxPost1));
+ for (var i = 0; i < idxPre1.length; i++) {
+ assert.eq(idxPre1[i], idxPost1[i]);
+ }
+
+ var idxPost2 = testDB.coll2.getIndexSpecs();
+ idxPost2.sort(idxSorter);
+ assert.eq(idxPre2.length, idxPost2.length,
+ "indexes before: " + tojson(idxPre2) + "\nindexes after: " + tojson(idxPost2));
+ for (i = 0; i < idxPre2.length; i++) {
+ assert.eq(idxPre2[i], idxPost2[i]);
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/indexes.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/indexes.js
new file mode 100644
index 00000000000..0f5cdc201db
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/indexes.js
@@ -0,0 +1,98 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that mongorestore handles restoring different types of
+ // indexes correctly.
+
+ jsTest.log('Testing restoration of different types of indexes');
+
+ var toolTest = getToolTest('indexes');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'indexes_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // create a bunch of indexes of different types
+ testColl.ensureIndex({a: 1});
+ testColl.ensureIndex({b: 1}, {sparse: true, unique: true});
+ testColl.ensureIndex({a: 1, b: -1});
+ testColl.ensureIndex({b: NumberLong("1"), a: NumberLong("1")});
+ testColl.ensureIndex({listField: 1});
+ testColl.ensureIndex({textField: 'text'}, {language: 'spanish'});
+ testColl.ensureIndex({geoField: '2dsphere'});
+
+ // store the getIndexes() output, to compare with the output
+ // after dumping and restoring
+ var indexesPre = testColl.getIndexes();
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 5; i++) {
+ data.push({a: i, b: i+1, listField: [i, i+1]});
+ data.push({textField: 'hola '+i});
+ data.push({geoField: {type: 'Point', coordinates: [i, i+1]}});
+ }
+ testColl.insertMany(data);
+ // sanity check the data was inserted
+ assert.eq(15, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the collection
+ testColl.drop();
+ // sanity check that the drop worked
+ assert.eq(0, testColl.count());
+ assert.eq(0, testColl.getIndexes().length);
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(15, testColl.count());
+
+ // make sure the indexes were restored correctly
+ var indexesPost = testColl.getIndexes();
+ assert.eq(indexesPre.length, indexesPost.length);
+
+ if (dump_targets === "archive") {
+ jsTest.log('skipping bson file restore test while running with archiving');
+ } else {
+ // drop the collection again
+ testColl.drop();
+ // sanity check that the drop worked
+ assert.eq(0, testColl.count());
+
+ assert.eq(0, testColl.getIndexes().length);
+
+ // restore the data, but this time mentioning the bson file specifically
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget+"/test/coll.bson"))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(15, testColl.count());
+
+ // make sure the indexes were restored correctly
+ indexesPost = testColl.getIndexes();
+ assert.eq(indexesPre.length, indexesPost.length);
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_dump_target.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_dump_target.js
new file mode 100644
index 00000000000..89ecaca7ddc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_dump_target.js
@@ -0,0 +1,32 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with invalid specified dumps (directories when
+ // files are expected, and visa versa).
+
+ jsTest.log('Testing running mongorestore with a invalid dump targets');
+
+ var toolTest = new ToolTest('invalid_dump_target');
+ toolTest.startDB('foo');
+
+ // run restore with a file, not a directory, specified as the dump location
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README')));
+ assert.neq(0, ret);
+
+ // run restore with --db specified and a file, not a directory, as the db dump
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README')));
+ assert.neq(0, ret);
+
+ // run restore with --collection specified and a directory, not a file,
+ // as the dump file
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_metadata.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_metadata.js
new file mode 100644
index 00000000000..72bcec079ae
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/invalid_metadata.js
@@ -0,0 +1,22 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests using mongorestore to restore data from a collection whose .metadata.json
+ // file contains invalid indexes.
+
+ jsTest.log('Testing restoration from a metadata file with invalid indexes');
+
+ var toolTest = new ToolTest('invalid_metadata');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a collection whose metadata file contains an invalid index
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'invalid_metadata']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/keep_index_version.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/keep_index_version.js
new file mode 100644
index 00000000000..498d3fc7fdb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/keep_index_version.js
@@ -0,0 +1,90 @@
+(function() {
+
+ load('jstests/common/check_version.js');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --keepIndexVersion does not
+ // update the index version, and that running it without
+ // --keepIndexVersion does.
+
+ jsTest.log('Testing mongorestore with --keepIndexVersion');
+
+ var toolTest = getToolTest('keep_index_version');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'keep_index_version_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ if (isAtLeastVersion(testDB.version(), '3.1.0')) {
+ jsTest.log("skipping test on "+testDB.version());
+ return;
+ }
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ jsTest.log("skipping test on "+testDB.version()+" when storage engine is wiredTiger");
+ return;
+ }
+
+ // create a version 0 index on the collection
+ testColl.ensureIndex({num: 1}, {v: 0});
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({num: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insert worked
+ assert.eq(10, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(10, testColl.count());
+
+ // make sure the index version was updated
+ var indexes = testColl.getIndexes();
+ assert.eq(2, indexes.length);
+ assert.eq(1, indexes[1].v);
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data with --keepIndexVersion specified
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--keepIndexVersion']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(10, testColl.count());
+
+ // make sure the index version was not updated
+ indexes = testColl.getIndexes();
+ assert.eq(2, indexes.length);
+ assert.eq(0, indexes[1].v);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/large_bulk.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/large_bulk.js
new file mode 100644
index 00000000000..2ffb80bddbe
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/large_bulk.js
@@ -0,0 +1,54 @@
+(function() {
+
+ // this test tests that the bulk api doesn't create BSON documents greater then the
+ // 16MB limit, as was discovered in TOOLS-939.
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('large_bulk');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // create a test collection
+
+ var oneK="";
+ var oneM="";
+ var i;
+ for (i=0; i<=1024; i++) {
+ oneK+="X";
+ }
+ for (i=0; i<=1024; i++) {
+ oneM+=oneK;
+ }
+
+ var data = [];
+ for (i=0; i<=32; i++) {
+ data.push({data: oneM});
+ }
+ dbOne.test.insertMany(data);
+
+ // dump it
+ var dumpTarget = 'large_bulk_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it
+ // 32 records are well under the 1k batch size
+ // so this should test wether the physcial size limit is respected
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_bson.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_bson.js
new file mode 100644
index 00000000000..3782cf631b9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_bson.js
@@ -0,0 +1,20 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore to restore data from a malformed bson file.
+
+ jsTest.log('Testing restoration from a malformed bson file');
+
+ var toolTest = new ToolTest('malformed_bson');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a malformed bson file
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'malformed_coll']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_metadata.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_metadata.js
new file mode 100644
index 00000000000..c1d8faab161
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/malformed_metadata.js
@@ -0,0 +1,22 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore to restore data from a collection with
+ // a malformed metadata file.
+
+ jsTest.log('Testing restoration from a malformed metadata file');
+
+ var toolTest = new ToolTest('malformed_metadata');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a collection with a malformed
+ // metadata.json file.
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'malformed_metadata']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/missing_dump.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/missing_dump.js
new file mode 100644
index 00000000000..4f96008cd35
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/missing_dump.js
@@ -0,0 +1,32 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests running mongorestore with a missing dump files and directories.
+
+ jsTest.log('Testing running mongorestore with missing dump files and directories');
+
+ var toolTest = new ToolTest('missing_dump');
+ toolTest.startDB('foo');
+
+ // run restore with a missing dump directory
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('xxxxxxxx')));
+ assert.neq(0, ret);
+
+ // run restore with --db and a missing dump directory
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test']
+ .concat(getRestoreTarget('xxxxxxxx')));
+ assert.neq(0, ret);
+
+ // specify --collection with a missing file
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/xxxxxxxx.bson')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/multiple_dbs.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/multiple_dbs.js
new file mode 100644
index 00000000000..cfd051c35a0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/multiple_dbs.js
@@ -0,0 +1,82 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to multiple dbs.
+
+ jsTest.log('Testing restoration to multiple dbs');
+
+ var toolTest = getToolTest('multiple_dbs');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'multiple_dbs_dump';
+ resetDbpath(dumpTarget);
+
+ // the dbs we will be using
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+
+ // we'll use two collections in each db, with one of
+ // the collection names common across the dbs
+ var oneOnlyCollName = 'dbOneColl';
+ var twoOnlyCollName = 'dbTwoColl';
+ var sharedCollName = 'bothColl';
+
+ // insert a bunch of data
+ var data = {};
+ data[oneOnlyCollName] = [];
+ data[twoOnlyCollName] = [];
+ data[sharedCollName+'one'] = [];
+ data[sharedCollName+'two'] = [];
+ for (var i = 0; i < 50; i++) {
+ data[oneOnlyCollName].push({_id: i+'_'+oneOnlyCollName});
+ data[twoOnlyCollName].push({_id: i+'_'+twoOnlyCollName});
+ data[sharedCollName+'one'].push({_id: i+'_dbOne_'+sharedCollName});
+ data[sharedCollName+'two'].push({_id: i+'_dbTwo_'+sharedCollName});
+ }
+ dbOne[oneOnlyCollName].insertMany(data[oneOnlyCollName]);
+ dbTwo[twoOnlyCollName].insertMany(data[twoOnlyCollName]);
+ dbOne[sharedCollName].insertMany(data[sharedCollName+'one']);
+ dbTwo[sharedCollName].insertMany(data[sharedCollName+'two']);
+
+ // sanity check the insertion worked
+ assert.eq(50, dbOne[oneOnlyCollName].count());
+ assert.eq(50, dbTwo[twoOnlyCollName].count());
+ assert.eq(50, dbOne[sharedCollName].count());
+ assert.eq(50, dbTwo[sharedCollName].count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the databases
+ dbOne.dropDatabase();
+ dbTwo.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored properly
+ assert.eq(50, dbOne[oneOnlyCollName].count());
+ assert.eq(50, dbTwo[twoOnlyCollName].count());
+ assert.eq(50, dbOne[sharedCollName].count());
+ assert.eq(50, dbTwo[sharedCollName].count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, dbOne[oneOnlyCollName].count({_id: i+'_'+oneOnlyCollName}));
+ assert.eq(1, dbTwo[twoOnlyCollName].count({_id: i+'_'+twoOnlyCollName}));
+ assert.eq(1, dbOne[sharedCollName].count({_id: i+'_dbOne_'+sharedCollName}));
+ assert.eq(1, dbTwo[sharedCollName].count({_id: i+'_dbTwo_'+sharedCollName}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/namespaces.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/namespaces.js
new file mode 100644
index 00000000000..cdad2a667d2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/namespaces.js
@@ -0,0 +1,152 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing namespaces escludes, includes, and mappings during restore');
+
+ var toolTest = getToolTest('namespaces');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'namespaces_dump';
+
+ // the db we will dump from
+ var source1DB = toolTest.db.getSiblingDB('source1');
+ var source2DB = toolTest.db.getSiblingDB('source2');
+ var source3DB = toolTest.db.getSiblingDB('source3');
+ // the db we will restore to
+ var destDB = toolTest.db.getSiblingDB('dest');
+
+ function performRestoreWithArgs(...args) {
+ return toolTest.runTool.apply(toolTest, ['restore']
+ .concat(args)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ function addTestDataTo(db, colls) {
+ colls.forEach(function(coll) {
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i+'_'+db.getName()+'.'+coll});
+ }
+ db[coll].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, db[coll].count());
+ // Add an index
+ var index = {};
+ index[db.getName()+'.'+coll] = 1;
+ db[coll].createIndex(index);
+ });
+ }
+
+ function verifyDataIn(collection, sourceNS) {
+ if (sourceNS === null) {
+ assert.eq(0, collection.count());
+ return;
+ }
+ assert.eq(500, collection.count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, collection.count({_id: i+'_'+sourceNS}));
+ }
+ assert.eq(1, collection.getIndexes()[1].key[sourceNS]);
+ }
+
+ addTestDataTo(source1DB, ['coll1', 'coll2', 'coll3']);
+ verifyDataIn(source1DB.coll1, 'source1.coll1');
+ verifyDataIn(source1DB.coll2, 'source1.coll2');
+ verifyDataIn(source1DB.coll3, 'source1.coll3');
+
+ addTestDataTo(source2DB, ['coll1', 'coll2', 'coll3']);
+ verifyDataIn(source2DB.coll1, 'source2.coll1');
+ verifyDataIn(source2DB.coll2, 'source2.coll2');
+ verifyDataIn(source2DB.coll3, 'source2.coll3');
+
+ addTestDataTo(source3DB, ['coll3', 'coll4']);
+ verifyDataIn(source3DB.coll3, 'source3.coll3');
+ verifyDataIn(source3DB.coll4, 'source3.coll4');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Get rid of the source databases
+ source1DB.dropDatabase();
+ source2DB.dropDatabase();
+ source3DB.dropDatabase();
+
+ // Exclude *.coll1
+ ret = performRestoreWithArgs('--nsExclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, null);
+ verifyDataIn(destDB.coll_1_2, 'source1.coll2');
+ verifyDataIn(destDB.coll_1_3, 'source1.coll3');
+ verifyDataIn(destDB.coll_2_1, null);
+ verifyDataIn(destDB.coll_2_2, 'source2.coll2');
+ verifyDataIn(destDB.coll_2_3, 'source2.coll3');
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, 'source3.coll3');
+ verifyDataIn(destDB.coll_3_4, 'source3.coll4');
+
+ destDB.dropDatabase();
+
+ // Inclode only *.coll1
+ ret = performRestoreWithArgs('--nsInclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, 'source1.coll1');
+ verifyDataIn(destDB.coll_1_2, null);
+ verifyDataIn(destDB.coll_1_3, null);
+ verifyDataIn(destDB.coll_2_1, 'source2.coll1');
+ verifyDataIn(destDB.coll_2_2, null);
+ verifyDataIn(destDB.coll_2_3, null);
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, null);
+ verifyDataIn(destDB.coll_3_4, null);
+
+ destDB.dropDatabase();
+
+ // Exclude collections beginning with 'coll' (which is all of them)
+ ret = performRestoreWithArgs('--excludeCollectionsWithPrefix', 'coll', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, null);
+ verifyDataIn(destDB.coll_1_2, null);
+ verifyDataIn(destDB.coll_1_3, null);
+ verifyDataIn(destDB.coll_2_1, null);
+ verifyDataIn(destDB.coll_2_2, null);
+ verifyDataIn(destDB.coll_2_3, null);
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, null);
+ verifyDataIn(destDB.coll_3_4, null);
+
+ destDB.dropDatabase();
+
+ // Swap source1 and source2 databases
+ ret = performRestoreWithArgs('--nsFrom', 'source1.*', '--nsTo', 'source2.*', '--nsFrom', 'source2.*', '--nsTo', 'source1.*');
+ assert.eq(0, ret);
+
+ verifyDataIn(source1DB.coll1, 'source2.coll1');
+ verifyDataIn(source1DB.coll2, 'source2.coll2');
+ verifyDataIn(source1DB.coll3, 'source2.coll3');
+ verifyDataIn(source2DB.coll1, 'source1.coll1');
+ verifyDataIn(source2DB.coll2, 'source1.coll2');
+ verifyDataIn(source2DB.coll3, 'source1.coll3');
+ verifyDataIn(source3DB.coll3, 'source3.coll3');
+ verifyDataIn(source3DB.coll4, 'source3.coll4');
+
+ source1DB.dropDatabase();
+ source2DB.dropDatabase();
+ source3DB.dropDatabase();
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_index_restore.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_index_restore.js
new file mode 100644
index 00000000000..43fa829de21
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_index_restore.js
@@ -0,0 +1,77 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --noIndexRestore does not
+ // restore indexes.
+
+ jsTest.log('Testing restoration with --noIndexRestore');
+
+ var toolTest = getToolTest('no_index_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'no_index_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use two collections, one with no indexes, the other
+ // with indexes
+ var collNames = ['coll1', 'coll2'];
+
+ // insert some data to be dumped
+ collNames.forEach(function(collName) {
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i, num: i+1, s: ''+i});
+ }
+ testDB[collName].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, testDB[collName].count());
+ });
+
+ // create some indexes for the second collection
+ testDB.coll2.ensureIndex({num: 1});
+ testDB.coll2.ensureIndex({num: 1, s: -1});
+ // sanity check the indexes were created
+ assert.eq(3, testDB.coll2.getIndexes().length);
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the collections
+ collNames.forEach(function(collName) {
+ testDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, testDB[collName].count());
+ assert.eq(0, testDB[collName].getIndexes().length);
+ });
+
+ // restore the data, with --noIndexRestore
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--noIndexRestore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored fully, and only the _id
+ // indexes were restored
+ collNames.forEach(function(collName) {
+ assert.eq(10, testDB[collName].count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, testDB[collName].count({_id: i}));
+ }
+
+ assert.eq(1, testDB[collName].getIndexes().length);
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_options_restore.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_options_restore.js
new file mode 100644
index 00000000000..89dd29551dd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/no_options_restore.js
@@ -0,0 +1,131 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Using the collection options command is the way to get full
+ // collection options as of 2.8, so we use this helper to
+ // pull the options from a listCollections cursor.
+ var extractCollectionOptions = function(db, name) {
+ var res = db.runCommand("listCollections");
+ for (var i = 0; i < res.cursor.firstBatch.length; i++) {
+ if (res.cursor.firstBatch[i].name === name) {
+ return res.cursor.firstBatch[i].options;
+ }
+ }
+ return {};
+ };
+
+ // Tests that running mongorestore with --noOptionsRestore does
+ // not restore collection options, and that running it without
+ // --noOptionsRestore does restore collection options.
+ jsTest.log('Testing restoration with --noOptionsRestore');
+
+ var toolTest = getToolTest('no_options_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'no_options_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use three different collections - the first will have
+ // options set, the second won't, the third will be capped.
+ // TODO: why aren't these being used?
+ // var collWithOptions = testDB.withOptions;
+ // var collWithoutOptions = testDB.withoutOptions;
+ // var collCapped = testDB.capped;
+
+ // create the noPadding collection
+ var noPaddingOptions = {noPadding: true};
+ testDB.createCollection('withOptions', noPaddingOptions);
+
+ // create the capped collection
+ var cappedOptions = {capped: true, size: 4096, autoIndexId: true};
+ testDB.createCollection('capped', cappedOptions);
+
+ // insert some data into all three collections
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testDB[collName].insertMany(data);
+ // sanity check the insertions worked
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // add options to the appropriate collection
+ cmdRet = testDB.runCommand({'collMod': 'withOptions', usePowerOf2Sizes: true});
+ assert.eq(1, cmdRet.ok);
+
+ // store the default options, because they change based on storage engine
+ var baseCappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ var baseWithOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ var baseWithoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // make sure the options were restored correctly
+ var cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ // Restore no longer honors autoIndexId.
+ if (!cappedOptionsFromDB.hasOwnProperty('autoIndexId')) {
+ cappedOptionsFromDB.autoIndexId = true;
+ }
+ assert.eq(baseCappedOptionsFromDB, cappedOptionsFromDB);
+ var withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ assert.eq(baseWithOptionsFromDB, withOptionsFromDB);
+ var withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+ assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data, without the options
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--noOptionsRestore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // make sure the options were not restored
+ cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ assert.eq(baseWithoutOptionsFromDB, cappedOptionsFromDB);
+ withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ assert.eq(baseWithoutOptionsFromDB, withOptionsFromDB);
+ withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+ assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB);
+
+ // additional check that the capped collection is no longer capped
+ var cappedStats = testDB.capped.stats();
+ assert(!cappedStats.capped);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/norestore_profile.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/norestore_profile.js
new file mode 100644
index 00000000000..84ea4a5b5dc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/norestore_profile.js
@@ -0,0 +1,58 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('norestore_profile');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // turn on the profiler
+ dbOne.setProfilingLevel(2);
+
+ // create some test data
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ dbOne.test.insertMany(data);
+ // run some queries to end up in the profile collection
+ dbOne.test.find({_id: 3});
+ dbOne.test.find({_id: 30});
+ dbOne.test.find({_id: 50});
+
+ assert.gt(dbOne.system.profile.count(), 0, "profiler still empty after running test setup");
+
+ // dump it
+ var dumpTarget = 'norestore_profile';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // turn off profiling and remove the profiler collection
+ dbOne.setProfilingLevel(0);
+ dbOne.system.profile.drop();
+ assert.eq(dbOne.system.profile.count(), 0);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it, this should restore everything *except* the profile collection
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // check that the data actually got restored
+ assert.gt(dbOne.test.count(), 100);
+
+ // but the profile collection should still be empty
+ assert.eq(dbOne.system.profile.count(), 0);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/objcheck_valid_bson.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/objcheck_valid_bson.js
new file mode 100644
index 00000000000..baa2d67545d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/objcheck_valid_bson.js
@@ -0,0 +1,46 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests that running mongorestore with --objcheck on valid bson
+ // files restores the data successfully.
+
+ jsTest.log('Testing restoration with --objcheck');
+
+ var toolTest = new ToolTest('objcheck_valid_bson');
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'objcheck_valid_bson_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 50; i++) {
+ data.push({_id: i});
+ }
+ testColl.insertMany(data);
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data, with --objcheck
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the restore completed succesfully
+ assert.eq(50, testColl.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js
new file mode 100644
index 00000000000..4148da1fffd
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js
@@ -0,0 +1,78 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore with the --oplogReplay and --oplogLimit flags.
+
+ jsTest.log('Testing restoration with the --oplogReplay and --oplogLimit options');
+
+ var toolTest = getToolTest('oplog_replay_and_limit');
+ var commonToolArgs = getCommonToolArguments();
+
+ // this test uses the testdata/dump_with_oplog directory. this directory contains:
+ // - a test/ subdirectory, which will restore objects { _id: i } for i from
+ // 0-9 to the test.data collection
+ // - an oplog.bson file, which contains oplog entries for inserts of
+ // objects { _id: i } for i from 10-14 to the test.data collection.
+ //
+ // within the oplog.bson file, the entries for i from 10-13 have timestamps
+ // 1416342265:2 through 1416342265:5. the entry for { _id: i } has
+ // timestamp 1500000000:1.
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // restore the data, without --oplogReplay. _ids 0-9, which appear in the
+ // collection's bson file, should be restored.
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(10, testColl.count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data, with --oplogReplay. _ids 10-14, appearing
+ // in the oplog.bson file, should be inserted as well.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(15, testColl.count());
+ for (i = 0; i < 15; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data, with --oplogReplay and --oplogLimit with a
+ // value that will filter out { _id: 14 } from getting inserted.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogLimit', '1416342266:0']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(14, testColl.count());
+ for (i = 0; i < 14; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_conflict.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_conflict.js
new file mode 100644
index 00000000000..b7399df669d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_conflict.js
@@ -0,0 +1,33 @@
+/**
+ * oplog_replay_conflict.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user provides two top priority
+ * oplogs and mongorestore should exit with an error.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var restoreTarget = 'jstests/restore/testdata/dump_oplog_conflict';
+
+ var toolTest = getToolTest('oplog_replay_conflict');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test');
+ testDB.createCollection('data');
+ var testColl = testDB.data;
+
+ // Replay the oplog from the provided oplog
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson',
+ restoreTarget].concat(commonToolArgs));
+
+ assert.eq(0, testColl.count(),
+ "no original entries should be restored");
+ assert.eq(1, ret, "restore operation succeeded when it shouldn't have");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js
new file mode 100644
index 00000000000..49d994348ee
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js
@@ -0,0 +1,67 @@
+/**
+ * oplog_replay_local_rs.js
+ *
+ * This file tests mongorestore with --oplogReplay where the oplog file is in the 'oplog.rs'
+ * collection of the 'local' database. This occurs when using a replica-set for replication.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_local_rs';
+
+ var toolTest = getToolTest('oplog_replay_local_rs');
+
+ // Set the test db to 'local' and collection to 'oplog.rs' to fake a replica set oplog
+ var testDB = toolTest.db.getSiblingDB('local');
+ var testColl = testDB['oplog.rs'];
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var oplogSize = 100;
+ testDB.createCollection('oplog.rs', {capped: true, size: 100000});
+
+ // Create a fake oplog consisting of 100 inserts.
+ for (var i = 0; i < oplogSize; i++) {
+ var r = testColl.insert({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: 'a' + i},
+ ns: "test.op",
+ });
+ assert.eq(1, r.nInserted, "insert failed");
+ }
+
+ // Dump the fake oplog.
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'local',
+ '-c', 'oplog.rs',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed");
+
+ // Dump original data.
+ testColl.drop();
+ assert.eq(0, testColl.count(), "all original entries should be dropped");
+
+
+ // Create the test.op collection.
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // Replay the oplog from the provided oplog
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js
new file mode 100644
index 00000000000..eae7db1519e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js
@@ -0,0 +1,19 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore with --oplogReplay when no oplog.bson file is present.
+
+ jsTest.log('Testing restoration with --oplogReplay and no oplog.bson file');
+
+ var toolTest = new ToolTest('oplog_replay_no_oplog');
+ toolTest.startDB('foo');
+
+ // run the restore, with a dump directory that has no oplog.bson file
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_noop.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_noop.js
new file mode 100644
index 00000000000..6a1f20d5cf6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_noop.js
@@ -0,0 +1,37 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore with --oplogReplay and noops in the oplog.bson,
+ // making sure the noops are ignored.
+
+ jsTest.log('Testing restoration with --oplogReplay and noops');
+
+ var toolTest = getToolTest('oplog_replay_noop');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // restore the data, with --oplogReplay
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_noop_in_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the document appearing in the oplog, which shows up
+ // after the noops, was added successfully
+ assert.eq(1, testColl.count());
+ assert.eq(1, testColl.count({a: 1}));
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js
new file mode 100644
index 00000000000..eb2eeb1138f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js
@@ -0,0 +1,40 @@
+/**
+ * oplog_replay_priority_oplog.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user provides two oplogs and
+ * mongorestore only restores the higher priority one.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var restoreTarget = 'jstests/restore/testdata/dump_local_oplog';
+
+ var toolTest = getToolTest('oplog_replay_priority_oplog');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test');
+ testDB.createCollection('data');
+ var testColl = testDB.data;
+ testDB.createCollection('op');
+ var restoreColl = testDB.op;
+
+ // Replay the oplog from the provided oplog
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson',
+ restoreTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ // Extra oplog has 5 entries as explained in oplog_replay_and_limit.js
+ assert.eq(5, testColl.count(),
+ "all original entries from high priority oplog should be restored");
+ assert.eq(0, restoreColl.count(),
+ "no original entries from low priority oplog should be restored");
+ toolTest.stop();
+}());
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js
new file mode 100644
index 00000000000..775da7fbc55
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js
@@ -0,0 +1,71 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_sizes';
+
+ // Helper for using mongorestore with --oplogReplay and a large oplog.bson
+ function tryOplogReplay(oplogSize, documentSize) {
+ var toolTest = getToolTest('oplog_replay_sizes');
+ // the test db and collections we'll be using
+ var testDB = toolTest.db.getSiblingDB('test_oplog');
+ var testColl = testDB.oplog;
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var debugString = 'with ' + oplogSize + ' ops of size ' + documentSize;
+ jsTest.log('Testing --oplogReplay ' + debugString);
+
+
+ // create a fake oplog consisting of a large number of inserts
+ var xStr = new Array(documentSize).join("x"); // ~documentSize bytes string
+ var data = [];
+ for (var i = 0; i < oplogSize; i++) {
+ data.push({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: xStr},
+ ns: "test.op"
+ });
+ if (data.length === 1000) {
+ testColl.insertMany(data);
+ data = [];
+ }
+ }
+ testColl.insertMany(data);
+
+ // dump the fake oplog
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'test_oplog',
+ '-c', 'oplog',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed " + debugString);
+
+ // create the test.op collection
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // trick restore into replaying the "oplog" we forged above
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay', dumpTarget+'/test_oplog']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed " + debugString);
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted " + debugString);
+ toolTest.stop();
+ }
+
+ // run the test on various oplog and op sizes
+ tryOplogReplay(1024, 1024); // sanity check
+ tryOplogReplay(1024*1024, 1); // millions of micro ops
+ tryOplogReplay(8, 16*1024*1023); // 8 ~16MB ops
+ tryOplogReplay(32, 1024*1024); // 32 ~1MB ops
+ tryOplogReplay(32*1024, 1024); // many ~1KB ops
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js
new file mode 100644
index 00000000000..ce3fff41f9b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js
@@ -0,0 +1,70 @@
+/**
+ * oplog_replay_specify_file.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user specifies a file with the
+ * --oplogFile flag.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_specify_file';
+
+ var toolTest = getToolTest('oplog_replay_specify_file');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test_oplog');
+ var testColl = testDB.foo;
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var oplogSize = 100;
+
+ // Create a fake oplog consisting of 100 inserts.
+ var data = [];
+ for (var i = 0; i < oplogSize; i++) {
+ data.push({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: 'a' + i},
+ ns: "test.op"
+ });
+ }
+ testColl.insertMany(data, {ordered: true});
+
+ // Dump the fake oplog.
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'test_oplog',
+ '-c', 'foo',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed");
+
+ // Dump original data.
+ testColl.drop();
+ assert.eq(0, testColl.count(),
+ "all original entries should be dropped");
+
+ // Create the test.op collection.
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // Replay the oplog from the provided oplog
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', dumpTarget + '/test_oplog/foo.bson',
+ dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted");
+ assert.eq(oplogSize, testColl.count(),
+ "all original entries should be restored");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/ordered_partial_index.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/ordered_partial_index.js
new file mode 100644
index 00000000000..2babdcafff5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/ordered_partial_index.js
@@ -0,0 +1,45 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests that using mongorestore on a collection with XXX
+
+ jsTest.log('Testing that restoration of XXX.');
+
+ var toolTest = getToolTest('ordered_partial_index');
+ var commonToolArgs = getCommonToolArguments();
+ var testDB = toolTest.db.getSiblingDB('test');
+ assert.eq(testDB.foo.exists(), null, "collection already exists in db");
+
+ // run a restore against the mongos
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_ordered_partial_index'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "the restore does not crash");
+
+ var apfe;
+ var indexes = testDB.foo.getIndexes();
+ for (var i=0; i<indexes.length; i++) {
+ if (indexes[i].name==="apfe") {
+ apfe=indexes[i].partialFilterExpression;
+ }
+ }
+
+ i = 0;
+ for (var fe in apfe) {
+ if (apfe.hasOwnProperty(fe)) {
+ assert.eq(fe, "a"+i, "partial indexes are in the correct order");
+ i++;
+ } else {
+ doassert("No property " + fe);
+ }
+ }
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/partial_restore.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/partial_restore.js
new file mode 100644
index 00000000000..fdfa2bc9a28
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/partial_restore.js
@@ -0,0 +1,83 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore only a subset of a dump (either a
+ // single db or a single collection) from a larger dump.
+
+ jsTest.log('Testing restoration of a subset of a dump');
+
+ var toolTest = getToolTest('partial_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'partial_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll insert data into three collections spread across two dbs
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+ var collOne = dbOne.collOne;
+ var collTwo = dbOne.collTwo;
+ var collThree = dbTwo.collThree;
+
+ // insert a bunch of data
+ var dataOne = [];
+ var dataTwo = [];
+ var dataThree = [];
+ for (var i = 0; i < 50; i++) {
+ collOne.insert({_id: i+'_collOne'});
+ collTwo.insert({_id: i+'_collTwo'});
+ collThree.insert({_id: i+'_collThree'});
+ }
+ collOne.insertMany(dataOne);
+ collTwo.insertMany(dataTwo);
+ collThree.insertMany(dataThree);
+ // sanity check the insertion worked
+ assert.eq(50, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(50, collThree.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the databases
+ dbOne.dropDatabase();
+ dbTwo.dropDatabase();
+
+ // restore a single db
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dbOne']
+ .concat(getRestoreTarget(dumpTarget+'/dbOne'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and nothing else but that db was restored
+ assert.eq(50, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(0, collThree.count());
+
+ // drop the data
+ dbOne.dropDatabase();
+
+ // restore a single collection
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'collTwo']
+ .concat(getRestoreTarget(dumpTarget+'/dbOne/collTwo.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and nothing else but that collection was restored
+ assert.eq(0, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(0, collThree.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js
new file mode 100644
index 00000000000..8bd8a67d6ae
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js
@@ -0,0 +1,28 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ jsTest.log('Testing that the order of fields is preserved in the oplog');
+
+ var toolTest = new ToolTest('ordered_oplog');
+ toolTest.startDB('foo');
+
+ var testDb = toolTest.db.getSiblingDB('test');
+ testDb.createCollection("foobar");
+
+ // run restore, with an "update" oplog with a _id field that is a subdocument with several fields
+ // { "h":{"$numberLong":"7987029173745013482"},"ns":"test.foobar",
+ // "o":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8},"foo":"bar"},
+ // "o2":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8}},"op":"u","ts":{"$timestamp":{"t":1439225650,"i":1}},"v":NumberInt(2)
+ // }
+ // if the _id from the "o" and the _id from the "o2" don't match then mongod complains
+ // run it several times, because with just one execution there is a chance that restore randomly selects the correct order
+ // With several executions the chances of all false positives diminishes.
+ for (var i=0; i<10; i++) {
+ var ret = toolTest.runTool('restore', '--oplogReplay', 'jstests/restore/testdata/dump_with_complex_id_oplog');
+ assert.eq(0, ret);
+ }
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/restore_document_validation.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/restore_document_validation.js
new file mode 100644
index 00000000000..fa72d670fdc
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/restore_document_validation.js
@@ -0,0 +1,180 @@
+/**
+ * restore_document_validation.js
+ *
+ * This file test that mongorestore works with document validation. It both checks that when
+ * validation is turned on invalid documents are not restored and that when a user indicates
+ * they want to bypass validation, that all documents are restored.
+ */
+
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ /**
+ * Part 1: Test that restore follows document validation rules.
+ */
+ jsTest.log('Testing that restore reacts well to document validation');
+
+ var toolTest = getToolTest('document_validation');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'doc_validation';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create 1000 documents, half of which will pass the validation
+ var data = [];
+ for (var i = 0; i < 1000; i++) {
+ if (i%2 === 0) {
+ data.push({_id: i, num: i+1, s: String(i)});
+ } else {
+ data.push({_id: i, num: i+1, s: String(i), baz: i});
+ }
+ }
+ testDB.bar.insertMany(data, {ordered: true});
+ // sanity check the insertion worked
+ assert.eq(1000, testDB.bar.count(), 'all documents should be inserted');
+
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '-v']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'dumping should run successfully');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // sanity check that we can restore the data without validation
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ assert.eq(1000, testDB.bar.count(), 'after the restore, all documents should be seen');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // turn on validation
+ var r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that it's working
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, "invalid documents shouldn't be inserted");
+
+ // restore the 1000 records of which only 500 are valid
+ ret = toolTest.runTool.apply(toolTest, ['restore', '-v']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring against a collection with validation on should still succeed');
+
+ assert.eq(500, testDB.bar.count(), 'only the valid documents should be restored');
+
+ /**
+ * Part 2: Test that restore can bypass document validation rules.
+ */
+ jsTest.log('Testing that bypass document validation works');
+
+ testDB.dropDatabase();
+
+ // turn on validation
+ r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that we cannot insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, 'invalid documents should not be inserted');
+
+ // restore the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be restored with bypass document validation set');
+
+ /**
+ * Part 3: Test that restore can restore the document validation rules,
+ * if they're dumped with the collection.
+ */
+ jsTest.log('Testing that dump and restore restores the validation rules themselves');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after validation rules are dropped');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(1000, testDB.bar.count());
+
+ // turn on validation on a existing collection
+ testDB.runCommand({'collMod': 'bar', 'validator': {baz: {$exists: true}}});
+
+ // re-dump everything, this time dumping the validation rules themselves
+ ret = toolTest.runTool.apply(toolTest, ['dump', '-v']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'the dump should run successfully');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after we drop validation rules');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring rules and some invalid documents should run successfully');
+ assert.eq(500, testDB.bar.count(),
+ 'restoring the validation rules and documents should only restore valid documents');
+
+ /**
+ * Part 4: Test that restore can bypass the document validation rules,
+ * even if they're dumped with the collection and restored with the collection.
+ */
+ jsTest.log('Testing that bypass document validation works when restoring the rules as well');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after validation rules are dropped');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be restored with bypass document validation set');
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/sharded_fullrestore.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/sharded_fullrestore.js
new file mode 100644
index 00000000000..7096943b0c3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/sharded_fullrestore.js
@@ -0,0 +1,45 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/sharding_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ var targetPath = 'restore_full_restore';
+ var toolTest = getToolTest('fullrestore');
+ var commonToolArgs = getCommonToolArguments();
+
+ var sourceDB = toolTest.db.getSiblingDB('blahblah');
+
+ // put in some sample data
+ var data = [];
+ for (var i=0; i<100; i++) {
+ data.push({x: 1});
+ }
+ sourceDB.test.insertMany(data);
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "dump of full sharded system should have succeeded");
+
+ // a full restore should fail
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.neq(ret, 0, "restore of full sharded system should have failed");
+
+ // delete the config dir
+ resetDbpath(targetPath + "/config");
+
+ // *now* the restore should succeed
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "restore of sharded system without config db should have succeeded");
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/stop_on_error.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/stop_on_error.js
new file mode 100644
index 00000000000..15f68466e51
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/stop_on_error.js
@@ -0,0 +1,50 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('stop_on_error');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // create a test collection
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ dbOne.test.insertMany(data);
+
+ // dump it
+ var dumpTarget = 'stop_on_error_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it - database was just dropped, so this should work successfully
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // restore it again with --stopOnError - this one should fail since there are dup keys
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--stopOnError', '-vvvv']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.neq(0, ret);
+
+ // restore it one more time without --stopOnError - there are dup keys but they will be ignored
+ ret = toolTest.runTool.apply(toolTest, ['restore', '-vvvv']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/symlinks.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/symlinks.js
new file mode 100644
index 00000000000..a27ef8b94c3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/symlinks.js
@@ -0,0 +1,46 @@
+(function() {
+
+ // Tests using mongorestore on a dump directory containing symlinks
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ jsTest.log('Testing restoration from a dump containing symlinks');
+
+ var toolTest = getToolTest('symlinks');
+
+ // this test uses the testdata/dump_with_soft_link. within that directory,
+ // the dbTwo directory is a soft link to testdata/soft_linked_db and the
+ // dbOne/data.bson file is a soft link to testdata/soft_linked_collection.bson.
+ // the file not_a_dir is a softlink to a bson file, and is there to make
+ // sure that softlinked regular files are not treated as directories.
+
+ // the two dbs we'll be using
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+ var notADir = toolTest.db.getSiblingDB('not_a_dir');
+
+ // restore the data
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_soft_links')));
+ assert.eq(0, ret);
+
+ // make sure the data was restored properly
+ assert.eq(10, dbOne.data.count());
+ assert.eq(10, dbTwo.data.count());
+ assert.eq(0, notADir.data.count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, dbOne.data.count({_id: i+'_dbOne'}));
+ assert.eq(1, dbTwo.data.count({_id: i+'_dbTwo'}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/blankdb/README b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/blankdb/README
new file mode 100644
index 00000000000..8a13ce0a00c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/blankdb/README
@@ -0,0 +1 @@
+This exists so that this directory can remain blank of .bson files but still be checked into version control.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir
new file mode 100644
index 00000000000..a9ada58715f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir
Binary files differ
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles.js
new file mode 100644
index 00000000000..eae515292b7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles.js
@@ -0,0 +1,87 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles');
+
+ var toolTest = getToolTest('users_and_roles');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restore the data, specifying --restoreDBUsersAndRoles
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_26_to_28.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_26_to_28.js
new file mode 100644
index 00000000000..705685acfd2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_26_to_28.js
@@ -0,0 +1,103 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from
+ // a 2.6 mongod and restoring to a 2.8+ mongod
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.6 dump to a 2.8 mongod');
+
+ var toolTest = new ToolTest('users_and_roles_26_to_28', {binVersion: '2.6'});
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_26_to_28_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '-vv', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as latest
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ delete toolTest.options.binVersion;
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles
+ ret = toolTest.runTool.apply(toolTest, ['restore', '-vv', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js
new file mode 100644
index 00000000000..1158ed5842f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js
@@ -0,0 +1,159 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from
+ // a 2.8 mongod and restoring to a 2.6 mongod, which should fail.
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.8 dump to a 2.6 mongod');
+
+ var toolTest = new ToolTest('users_and_roles_28_to_26');
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_28_to_26_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles. it should fail
+ // since the auth version is too new
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.8 dump to a 2.6 mongod');
+
+ toolTest = new ToolTest('users_and_roles_28_to_26');
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ dumpTarget = 'users_and_roles_28_to_26_dump';
+
+ // the db we'll be using
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles. it should fail
+ // since the auth version is too new
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js
new file mode 100644
index 00000000000..a00dcc62f88
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js
@@ -0,0 +1,144 @@
+// This test requires mongo 2.6.x releases
+// @tags: [requires_mongo_26]
+(function() {
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles against
+ // a full dump.
+
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles against'+
+ ' a full dump');
+
+ if (typeof getDumpTarget === 'undefined') {
+ load('jstests/configs/standard_dump_targets.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion) {
+
+ jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') +
+ ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' +
+ (restoreVersion || 'latest') + ', and destDBVersion=' +
+ (destDBVersion || 'latest'));
+
+ var toolTest = new ToolTest('users_and_roles_full_dump',
+ {binVersion: sourceDBVersion});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_full_dump_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using, and the admin db
+ var adminDB = toolTest.db.getSiblingDB('admin');
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create a user and role on the admin database
+ adminDB.createUser({
+ user: 'adminUser',
+ pwd: 'password',
+ roles: [{role: 'read', db: 'admin'}],
+ });
+ adminDB.createRole({
+ role: 'adminRole',
+ privileges: [{
+ resource: {db: 'admin', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ var data = [];
+ for (var i = 0; i < 10; i++) {
+ data.push({_id: i});
+ }
+ testDB.data.insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''),
+ '--port', toolTest.port]
+ .concat(getDumpTarget(dumpTarget));
+ var ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // restart the mongod, with a clean db path
+ stopMongod(toolTest.port);
+ resetDbpath(toolTest.dbpath);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options.binVersion = destDBVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db references
+ adminDB = toolTest.db.getSiblingDB('admin');
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // do a full restore
+ args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''),
+ '--port', toolTest.port]
+ .concat(getRestoreTarget(dumpTarget));
+ ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+ var adminUsers = adminDB.getUsers();
+ assert.eq(1, adminUsers.length);
+ assert.eq('adminUser', adminUsers[0].user);
+
+ // make sure the roles were restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+ var adminRoles = adminDB.getRoles();
+ assert.eq(1, adminRoles.length);
+ assert.eq('adminRole', adminRoles[0].role);
+
+ // success
+ toolTest.stop();
+
+ };
+
+ // 'undefined' triggers latest
+ runTest('2.6', '2.6', undefined, '2.6');
+ runTest('2.6', '2.6', undefined, undefined);
+ runTest('2.6', undefined, undefined, undefined);
+ runTest('2.6', undefined, undefined, '2.6');
+ runTest(undefined, undefined, undefined, undefined);
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern.js
new file mode 100644
index 00000000000..ab95be4abb2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern.js
@@ -0,0 +1,69 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+ var dbOne = rs.getPrimary().getDB("dbOne");
+
+ // create a test collection
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ dbOne.test.insertMany(data);
+ rs.awaitReplication();
+
+ // dump the data that we'll
+ var dumpTarget = 'write_concern_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(writeConcern)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongorestore',
+ '--writeConcern={w:3}', '--host', rs.getPrimary().host]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest, testSetup);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern_mongos.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern_mongos.js
new file mode 100644
index 00000000000..7d71080fa7c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/restore/write_concern_mongos.js
@@ -0,0 +1,74 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = new ToolTest('write_concern', null);
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+ var commonToolArgs = getCommonToolArguments();
+ var dbOne = st.s.getDB("dbOne");
+
+ // create a test collection
+ var data = [];
+ for (var i=0; i<=100; i++) {
+ data.push({_id: i, x: i*i});
+ }
+ dbOne.test.insertMany(data);
+ rs.awaitReplication();
+
+ // dump the data that we'll
+ var dumpTarget = 'write_concern_mongos_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '-d', 'dbOne']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(writeConcern)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ }
+
+ function testSetup() {
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongorestore',
+ '--writeConcern={w:3}', '--host', st.s.host]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest, testSetup);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/ssl/ssl_with_system_ca.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/ssl/ssl_with_system_ca.js
new file mode 100644
index 00000000000..4f3126e3ddf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/ssl/ssl_with_system_ca.js
@@ -0,0 +1,43 @@
+// On OSX this test assumes that jstests/libs/trusted-ca.pem has been added as a trusted
+// certificate to the login keychain of the evergreen user. See,
+// https://github.com/10gen/buildslave-cookbooks/commit/af7cabe5b6e0885902ebd4902f7f974b64cc8961
+// for details.
+((function() {
+ 'use strict';
+ const HOST_TYPE = getBuildInfo().buildEnvironment.target_os;
+
+ if (HOST_TYPE === "windows") {
+ runProgram(
+ "certutil.exe", "-addstore", "-user", "-f", "CA", "jstests\\libs\\trusted-ca.pem");
+ }
+
+ var testWithCerts = function(serverPem) {
+ jsTest.log(`Testing with SSL certs $ {
+ serverPem
+ }`);
+ // allowSSL instead of requireSSL so that the non-SSL connection succeeds.
+ var conn = MongoRunner.runMongod(
+ {sslMode: 'requireSSL', sslPEMKeyFile: "jstests/libs/" + serverPem});
+
+ // Should not be able to authenticate with x509.
+ // Authenticate call will return 1 on success, 0 on error.
+ var argv =
+ ['./mongodump', '-v', '--ssl', '--port', conn.port];
+ if (HOST_TYPE === "linux") {
+ // On Linux we override the default path to the system CA store to point to our
+ // "trusted" CA. On Windows, this CA will have been added to the user's trusted CA list
+ argv.unshift("env", "SSL_CERT_FILE=jstests/libs/trusted-ca.pem");
+ }
+ var exitStatus = runMongoProgram.apply(null, argv);
+ assert.eq(exitStatus, 0, "successfully connected with SSL");
+
+ MongoRunner.stopMongod(conn);
+ };
+
+ assert.throws(function() {
+ testWithCerts("server.pem", "client.pem");
+ });
+ assert.doesNotThrow(function() {
+ testWithCerts("trusted-server.pem", "trusted-client.pem");
+ });
+})());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_auth.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_auth.js
new file mode 100644
index 00000000000..46d1600519d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_auth.js
@@ -0,0 +1,30 @@
+(function() {
+ load("jstests/libs/mongostat.js");
+ var port = allocatePort();
+ var m = startMongod(
+ "--auth",
+ "--port", port,
+ "--dbpath", MongoRunner.dataPath+"stat_auth"+port,
+ "--bind_ip", "127.0.0.1");
+
+ var db = m.getDB("admin");
+ db.createUser({
+ user: "foobar",
+ pwd: "foobar",
+ roles: jsTest.adminUserRoles
+ });
+
+ assert(db.auth("foobar", "foobar"), "auth failed");
+
+ var args = ["mongostat",
+ "--host", "127.0.0.1:" + port,
+ "--rowcount", "1",
+ "--authenticationDatabase", "admin",
+ "--username", "foobar"];
+
+ var x = runMongoProgram.apply(null, args.concat("--password", "foobar"));
+ assert.eq(x, exitCodeSuccess, "mongostat should exit successfully with foobar:foobar");
+
+ x = runMongoProgram.apply(null, args.concat("--password", "wrong"));
+ assert.eq(x, exitCodeErr, "mongostat should exit with an error exit code with foobar:wrong");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_broken_pipe.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_broken_pipe.js
new file mode 100644
index 00000000000..73bd0b01b83
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_broken_pipe.js
@@ -0,0 +1,34 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = getToolTest('stat_broken_pipe');
+ var baseArgs = getCommonToolArguments();
+ baseArgs = baseArgs.concat('--port', toolTest.port);
+
+ if (toolTest.useSSL) {
+ baseArgs = baseArgs.concat([
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslAllowInvalidHostnames']);
+ }
+ var statArgs = ['mongostat', '--rowcount=5'].concat(baseArgs);
+ var ddArgs = ['dd', 'count=1000000', 'bs=1024', 'of=/dev/null'];
+ if (_isWindows()) {
+ statArgs[0] += '.exe';
+ }
+ statArgs.unshift('set -o pipefail', '&&', 'PATH=.:$PATH');
+
+ var ret = runProgram('bash', '-c', statArgs.concat('|', ddArgs).join(' '));
+ assert.eq(0, ret, "bash execution should succeed");
+
+ ddArgs = ['dd', 'count=100', 'bs=1', 'of=/dev/null'];
+ ret = runProgram('bash', '-c', statArgs.concat('|', ddArgs).join(' '));
+ assert.neq(0, ret, "bash execution should fail");
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/broken pipe|The pipe is being closed/);
+ }, 'should print an error message');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_custom_headers.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_custom_headers.js
new file mode 100644
index 00000000000..e105360483a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_custom_headers.js
@@ -0,0 +1,132 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+ load("jstests/libs/extended_assert.js");
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest("stat_custom_headers");
+ var port = toolTest.port;
+
+ var x, rows;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-O", "metrics.record.moves");
+ assert.eq(x, exitCodeBadOptions, "mongostat should fail with both -o and -O options");
+ clearRawMongoProgramOutput();
+
+ // basic -o --humanReadable=false
+ var expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-n", expectedRowCnt, "--humanReadable=false");
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+
+ assert.eq.soon(expectedRowCnt + 1, function() {
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,time",
+ "first row doesn't match 'host conn time'");
+ assert.eq(statFields(rows[1]).length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ clearRawMongoProgramOutput();
+
+ // basic -o
+ expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-n", expectedRowCnt);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(expectedRowCnt + 1, function() {
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,time",
+ "first row doesn't match 'host conn time'");
+ assert.eq(statFields(rows[1]).length, 5,
+ "there should be exactly five entries for a row of this stat output (time counts as three)");
+ clearRawMongoProgramOutput();
+
+ // basic -O
+ expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-O", "host", "-n", expectedRowCnt);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ var fields = statFields(rows[0]);
+ assert.eq(fields[fields.length-1], "host",
+ "first row should end with added 'host' field");
+ clearRawMongoProgramOutput();
+
+ // named
+ expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host=H,conn=C,time=MYTiME", "-n", expectedRowCnt);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(expectedRowCnt + 1, function() {
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "H,C,MYTiME",
+ "first row doesn't match 'H C MYTiME'");
+ assert.eq(statFields(rows[1]).length, 5,
+ "there should be exactly five entries for a row of this stat output (time counts as three)");
+ clearRawMongoProgramOutput();
+
+ // serverStatus custom field
+ expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,mem.bits", "-n", expectedRowCnt);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(expectedRowCnt + 1, function() {
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,mem.bits",
+ "first row doesn't match 'host time mem.bits'");
+ fields = statFields(rows[1]);
+ assert.eq(fields.length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ assert(fields[2] === "32" || fields[2] === "64",
+ "mem.bits didn't yield valid output (should be one of 32 or 64, was '"
+ +fields[2]+"')");
+ clearRawMongoProgramOutput();
+
+ // serverStatus named field
+ expectedRowCnt = 4;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn=MYCoNN,mem.bits=BiTs", "-n", expectedRowCnt);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(expectedRowCnt + 1, function() {
+ rows = statRows();
+ if (toolTest.useSSL) {
+ rows = rows.slice(1);
+ }
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,MYCoNN,BiTs",
+ "first row doesn't match 'host MYTiME BiTs'");
+ fields = statFields(rows[1]);
+ assert.eq(fields.length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ assert(fields[2] === "32" || fields[2] === "64",
+ "mem.bits didn't yield valid output (should be one of 32 or 64, was '"
+ +fields[2]+"')");
+ clearRawMongoProgramOutput();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover.js
new file mode 100644
index 00000000000..9a818b1f113
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover.js
@@ -0,0 +1,60 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+
+ var toolTest = getToolTest("stat_discover");
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 4,
+ useHostName: true,
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+
+ worked = statCheck(["mongostat",
+ "--port", rs.liveNodes.master.port,
+ "--discover"],
+ hasOnlyPorts(rs.ports));
+ assert(worked, "when only port is used, each host still only appears once");
+
+ assert(discoverTest(rs.ports, rs.liveNodes.master.host), "--discover against a replset master sees all members");
+
+ assert(discoverTest(rs.ports, rs.liveNodes.slaves[0].host), "--discover against a replset slave sees all members");
+
+ hosts = [rs.liveNodes.master.host, rs.liveNodes.slaves[0].host, rs.liveNodes.slaves[1].host];
+ ports = [rs.liveNodes.master.port, rs.liveNodes.slaves[0].port, rs.liveNodes.slaves[1].port];
+ worked = statCheck(['mongostat',
+ '--host', hosts.join(',')],
+ hasOnlyPorts(ports));
+ assert(worked, "replica set specifiers are correctly used");
+
+ assert(discoverTest([toolTest.port], toolTest.m.host), "--discover against a stand alone-sees just the stand-alone");
+
+ // Test discovery with nodes cutting in and out
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect("mongostat", "--host", rs.liveNodes.slaves[1].host, "--discover");
+
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "discovered host is seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "specified host is seen");
+
+ rs.stop(rs.liveNodes.slaves[0]);
+ assert.soon(lacksPort(rs.liveNodes.slaves[0].port), "after discovered host is stopped, it is not seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "after discovered host is stopped, specified host is still seen");
+
+ rs.start(rs.liveNodes.slaves[0]);
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "after discovered is restarted, discovered host is seen again");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "after discovered is restarted, specified host is still seen");
+
+ rs.stop(rs.liveNodes.slaves[1]);
+ assert.soon(lacksPort(rs.liveNodes.slaves[1].port), "after specified host is stopped, specified host is not seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "after specified host is stopped, the discovered host is still seen");
+
+ stopMongoProgramByPid(pid);
+
+ rs.stopSet();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover_shard.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover_shard.js
new file mode 100644
index 00000000000..621cdcdc388
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_discover_shard.js
@@ -0,0 +1,14 @@
+(function() {
+ load("jstests/libs/mongostat.js");
+
+ var st = new ShardingTest({name: "shard1", shards: 2});
+ shardPorts = [st._mongos[0].port, st._connections[0].port, st._connections[1].port];
+
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect("mongostat", "--host", st._mongos[0].host, "--discover");
+ assert.soon(hasOnlyPorts(shardPorts), "--discover against a mongos sees all shards");
+
+ st.stop();
+ assert.soon(hasOnlyPorts([]), "stops showing data when hosts come down");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "mongostat --discover against a sharded cluster shouldn't error when the cluster goes down");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_header.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_header.js
new file mode 100644
index 00000000000..e9fe39957d5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_header.js
@@ -0,0 +1,27 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/mongostat.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('stat_header');
+
+ function outputIncludesHeader() {
+ return rawMongoProgramOutput()
+ .split("\n").some(function(line) {
+ return line.match(/^sh\d+\| insert/);
+ });
+ }
+
+ clearRawMongoProgramOutput();
+ x = runMongoProgram("mongostat", "--port", toolTest.port, "--rowcount", 1);
+ assert.soon(outputIncludesHeader, "normally a header appears");
+
+ clearRawMongoProgramOutput();
+ x = runMongoProgram("mongostat", "--port", toolTest.port, "--rowcount", 1, "--noheaders");
+ assert.eq.soon(false, outputIncludesHeader, "--noheaders suppresses the header");
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js
new file mode 100644
index 00000000000..56c5a10d30d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js
@@ -0,0 +1,45 @@
+// @tags: [requires_mmap_available]
+(function() {
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+ load("jstests/libs/mongostat.js");
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var mmap_options = {storageEngine: "mmapv1"};
+ var wt_options = {storageEngine: "wiredTiger"};
+ var replTest = new ReplSetTest({
+ nodes: {
+ node0: mmap_options,
+ node1: mmap_options,
+ node2: wt_options,
+ },
+ });
+
+ replTest.startSet();
+ replTest.initiate();
+ replTest.awaitReplication();
+
+ clearRawMongoProgramOutput();
+ assert(discoverTest(replTest.ports, replTest.nodes[0].host), "mongostat against a heterogenous storage engine replica set sees all hosts");
+
+ clearRawMongoProgramOutput();
+ runMongoProgram("mongostat", "--host", replTest.nodes[0].host, "--rowcount", 7, "--discover");
+ assert.strContains.soon("used flushes mapped", rawMongoProgramOutput, "against replset has fields for both engines");
+
+ replTest.stopSet();
+
+ st = new ShardingTest({shards: [wt_options, mmap_options], options: {nopreallocj: true}});
+ stdb = st.getDB("test");
+ shardPorts = [st._mongos[0].port, st._connections[0].port, st._connections[1].port];
+
+ clearRawMongoProgramOutput();
+ assert(discoverTest(shardPorts, st._mongos[0].host, "mongostat reports on a heterogenous storage engine sharded cluster"));
+
+ clearRawMongoProgramOutput();
+ runMongoProgram("mongostat", "--host", st._mongos[0].host, "--rowcount", 7, "--discover");
+ assert.strContains.soon("used flushes mapped", rawMongoProgramOutput, "against sharded cluster has fields for both engines");
+
+ st.stop();
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_rowcount.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_rowcount.js
new file mode 100644
index 00000000000..30fc01ea5be
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/stat/stat_rowcount.js
@@ -0,0 +1,60 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+ var commonToolArgs = getCommonToolArguments();
+ print("common tool sargs");
+ printjson(commonToolArgs);
+
+ var toolTest = getToolTest('stat_rowcount');
+ var x, pid;
+ clearRawMongoProgramOutput();
+
+ x = runMongoProgram("mongostat", "--host", toolTest.m.host, "--rowcount", 7, "--noheaders");
+ var expectecRows = 7;
+ if (toolTest.useSSL) {
+ expectecRows = 8;
+ }
+ assert.eq.soon(expectecRows, function() {
+ return rawMongoProgramOutput().split("\n").filter(function(r) {
+ return r.match(rowRegex);
+ }).length;
+ }, "--rowcount value is respected correctly");
+
+ startTime = new Date();
+ x = runMongoProgram("mongostat", "--host", toolTest.m.host, "--rowcount", 3, "--noheaders", 3);
+ endTime = new Date();
+ duration = Math.floor((endTime - startTime) / 1000);
+ assert.gte(duration, 9, "sleep time affects the total time to produce a number or results");
+
+ clearRawMongoProgramOutput();
+
+ pid = startMongoProgramNoConnect.apply(null, ["mongostat", "--port", toolTest.port].concat(commonToolArgs));
+ assert.strContains.soon('sh'+pid+'| ', rawMongoProgramOutput, "should produce some output");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "stopping should cause mongostat exit with a 'stopped' code");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--port", toolTest.port - 1, "--rowcount", 1].concat(commonToolArgs));
+ assert.neq(exitCodeSuccess, x, "can't connect causes an error exit code");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--rowcount", "-1"].concat(commonToolArgs));
+ assert.eq(exitCodeBadOptions, x, "mongostat --rowcount specified with bad input: negative value");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--rowcount", "foobar"].concat(commonToolArgs));
+ assert.eq(exitCodeBadOptions, x, "mongostat --rowcount specified with bad input: non-numeric value");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--host", "badreplset/127.0.0.1:" + toolTest.port, "--rowcount", 1].concat(commonToolArgs));
+ assert.eq(exitCodeErr, x, "--host used with a replica set string for nodes not in a replica set");
+
+ pid = startMongoProgramNoConnect.apply(null, ["mongostat", "--host", "127.0.0.1:" + toolTest.port].concat(commonToolArgs));
+ assert.strContains.soon('sh'+pid+'| ', rawMongoProgramOutput, "should produce some output");
+
+ MongoRunner.stopMongod(toolTest.port);
+ assert.gte.soon(10, function() {
+ var rows = statRows();
+ return statFields(rows[rows.length - 1]).length;
+ }, "should stop showing new stat lines");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "mongostat shouldn't error out when the server goes down");
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_json.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_json.js
new file mode 100644
index 00000000000..1828d578191
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_json.js
@@ -0,0 +1,49 @@
+// mongotop_json.js; ensure that running mongotop using the --json flag works as
+// expected
+var testName = 'mongotop_json';
+(function() {
+ jsTest.log('Testing mongotop --json option');
+ load('jstests/top/util/mongotop_common.js');
+ var assert = extendedAssert;
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // ensure tool runs without error with --rowcount = 1
+ var ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 1');
+ assert.eq.soon('object', function() {
+ return typeof JSON.parse(extractJSON(ret.getOutput()));
+ }, 'invalid JSON 1');
+
+ // ensure tool runs without error with --rowcount > 1
+ var rowcount = 5;
+ clearRawMongoProgramOutput();
+ ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', rowcount].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 2');
+ var output;
+ assert.eq.soon(rowcount, function() {
+ output = ret.getOutput().split('\n');
+ if (jsTestOptions().useSSL) {
+ output = output.slice(1);
+ }
+ return output.length;
+ }, "expected " + rowcount + " top results");
+ output.forEach(function(line) {
+ assert(typeof JSON.parse(extractJSON(line)) === 'object', 'invalid JSON 2');
+ });
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_reports.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_reports.js
new file mode 100644
index 00000000000..cb6f986733c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_reports.js
@@ -0,0 +1,152 @@
+// mongotop_reports.js; ensure that running mongotop reports accurately on operations
+// going on in namespaces
+var testName = 'mongotop_reports';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop\'s reporting fidelity');
+ var assert = extendedAssert;
+ var read = 'read';
+ var write = 'write';
+
+ var runReportTest = function(topology, passthrough, test) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough on ' + test.name + ' shell');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ db = conn.getDB('foo'); // eslint-disable-line no-native-reassign
+ db.dropDatabase();
+ assert.eq(db.bar.count(), 0, 'drop failed');
+
+ // start the parallel shell command
+ if (passthrough.name === auth.name) {
+ var authCommand = '\n db.getSiblingDB(\'admin\').auth(\'' + authUser + '\',\'' + authPassword + '\'); \n';
+ test.shellCommand = authCommand + test.shellCommand;
+ }
+ var shellWorkload = startParallelShell(test.shellCommand);
+
+ // allow for command to actually start
+ sleep(5000);
+
+ // ensure tool runs without error
+ clearRawMongoProgramOutput();
+ var ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 1');
+ var parsedOutput;
+ assert.eq.soon('object', function() {
+ parsedOutput = JSON.parse(extractJSON(ret.getOutput()));
+ return typeof parsedOutput;
+ }, 'invalid JSON 1');
+
+ // ensure only the active namespaces reports a non-zero value
+ for (var namespace in parsedOutput.totals) {
+ if (!parsedOutput.totals.hasOwnProperty(namespace)) {
+ continue;
+ }
+ var isAuthActivity = namespace.indexOf('.system.') !== -1;
+ var isReplActivity = namespace.indexOf('local.') !== -1;
+ var isConfigActivity = namespace.indexOf('config.') !== -1;
+
+ // authentication and replication activity should be ignored
+ if (isAuthActivity || isReplActivity || isConfigActivity) {
+ continue;
+ }
+
+ var nsDetails = parsedOutput.totals[namespace];
+ assert.neq(nsDetails, undefined, 'no details reported for namespace ' + namespace);
+
+ var comparator = 'eq';
+ var shouldHaveActivity = test.namespaces.filter(function(testSpace) { // eslint-disable-line no-loop-func
+ return testSpace === namespace;
+ });
+
+ // return the opposite comparator if this namespace should have activity
+ if (shouldHaveActivity.length !== 0) {
+ comparator = 'neq';
+ }
+
+ test.indicators.forEach(function(indicator) { // eslint-disable-line no-loop-func
+ ['count', 'time'].forEach(function(metric) {
+ assert[comparator](nsDetails[indicator][metric], 0, 'unexpected ' + indicator + ' activity on ' + namespace + '; ' + metric + ': ' + nsDetails[indicator][metric]);
+ if (test.indicators.length === 1) {
+ // read or write shell
+ var opposite = read;
+ if (test.name === read) {
+ opposite = write;
+ }
+ // ensure there's no activity on the inactive metric
+ // sometimes the readings are a bit out of sync - making some
+ // allowance to prevent test flakiness
+ assert.between(0, nsDetails[opposite][metric], 1, 'unexpected ' + opposite + ' (opposite) activity on ' + namespace + '; ' + metric + ': ' + nsDetails[opposite][metric]);
+ } else {
+ // read/write shell should have read and write activity
+ assert[comparator](nsDetails[read][metric], 0, 'unexpected ' + read + ' activity (read/write) on ' + namespace + '; ' + metric + ': ' + nsDetails[read][metric]);
+ assert[comparator](nsDetails[write][metric], 0, 'unexpected ' + write + ' activity (read/write) on ' + namespace + '; ' + metric + ': ' + nsDetails[write][metric]);
+ }
+ var calculatedSum = nsDetails[read][metric] + nsDetails[write][metric];
+ var expectedSum = nsDetails['total'][metric];
+
+ // sometimes the total isn't exact - making some allowance to prevent
+ // test flakiness
+ assert.between(0, expectedSum - calculatedSum, 1, 'unexpected sum for metric ' + metric + ': expected ' + expectedSum + ' but got ' + calculatedSum);
+ });
+ });
+ }
+ t.stop();
+
+ // Swallow the exit code for the shell per SERVER-25777.
+ shellWorkload();
+ };
+
+ var runTests = function(topology, passthrough) {
+ var readShell = '\nprint(\'starting read\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) \n{ ' +
+ ' db.getSiblingDB(\'foo\').bar.find({ x: i }).forEach(function(){}); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var writeShell = '\nprint(\'starting write\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) { \n' +
+ ' db.getSiblingDB(\'foo\').bar.insert({ x: i }); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var readWriteShell = '\nprint(\'starting read/write\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) \n{ ' +
+ ' db.getSiblingDB(\'foo\').bar.insert({ x: i }); \n' +
+ ' db.getSiblingDB(\'foo\').bar.find({ x: i }).forEach(function(){}); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var testSpaces = [
+ ['foo.bar'],
+ ['foo.bar', 'bar.foo'],
+ ];
+
+ var tests = [{
+ name: read,
+ indicators: [read],
+ shellCommand: readShell,
+ }, {
+ name: write,
+ indicators: [write],
+ shellCommand: writeShell,
+ }, {
+ name: read + '/' + write,
+ indicators: [read, write],
+ shellCommand: readWriteShell,
+ }];
+
+ tests.forEach(function(test) {
+ testSpaces.forEach(function(testSpace) {
+ test.namespaces = testSpace;
+ runReportTest(topology, passthrough, test);
+ });
+ });
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_sharded.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_sharded.js
new file mode 100644
index 00000000000..448a19530db
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_sharded.js
@@ -0,0 +1,47 @@
+// mongotop_sharded.js; ensure that running mongotop against a sharded cluster
+// fails with a useful error message
+var testName = 'mongotop_sharded';
+(function() {
+ jsTest.log('Testing mongotop against sharded cluster');
+ load('jstests/top/util/mongotop_common.js');
+ var assert = extendedAssert;
+
+ var expectedError = 'cannot run mongotop against a mongos';
+ var verifyOutput = function(getOutput) {
+ assert.strContains.soon(expectedError, getOutput, 'error message must appear at least once');
+ var shellOutput = getOutput();
+ jsTest.log('shell output: ' + shellOutput);
+ shellOutput.split('\n').slice(1).forEach(function(line) {
+ // check the displayed error message
+ assert.neq(line.match(expectedError), null, 'unexpected error message');
+ });
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // getting the version should work without error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--version'].concat(passthrough.args)), 0, 'failed 1');
+
+ // getting the help text should work without error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--help'].concat(passthrough.args)), 0, 'failed 2');
+
+ // anything that runs against the mongos server should fail
+ var result = executeProgram(['mongotop', '--port', conn.port].concat(passthrough.args));
+ assert.neq(result.exitCode, 0, 'expected failure against a mongos');
+ verifyOutput(result.getOutput);
+
+ result = executeProgram(['mongotop', '--port', conn.port, '2'].concat(passthrough.args));
+ assert.neq(result.exitCode, 0, 'expected failure against a mongos');
+ verifyOutput(result.getOutput);
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_stress.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_stress.js
new file mode 100644
index 00000000000..5693a1b8254
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_stress.js
@@ -0,0 +1,56 @@
+// mongotop_stress.js; ensure that running mongotop, even when the server is
+// under heavy load, works as expected
+var testName = 'mongotop_stress';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop\'s performance under load');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ db = conn.getDB('foo'); // eslint-disable-line no-native-reassign
+
+ // concurrently insert documents into thousands of collections
+ var stressShell = '\nprint(\'starting read/write stress test\'); \n' +
+ ' if (\'' + passthrough.name + '\' === \'auth\')' +
+ ' db.getSiblingDB(\'admin\').auth(\'' + authUser + '\',\'' + authPassword + '\'); ' +
+ ' var dbName = (Math.random() + 1).toString(36).substring(7); ' +
+ ' var clName = (Math.random() + 1).toString(36).substring(7); ' +
+ ' for (var i = 0; i < 10000; ++i) { ' +
+ ' db.getSiblingDB(dbName).getCollection(clName).find({ x: i }).forEach(); \n' +
+ ' sleep(1); \n' +
+ ' db.getSiblingDB(dbName).getCollection(clName).insert({ x: i }); \n' +
+ ' sleep(1);\n' +
+ ' }\n';
+
+ var shells = [];
+ for (var i = 0; i < 10; ++i) {
+ shells.push(startParallelShell(stressShell));
+ }
+
+ // wait a bit for the stress to kick in
+ sleep(5000);
+ jsTest.log('Current operation(s)');
+ printjson(db.currentOp());
+
+ // ensure tool runs without error
+ clearRawMongoProgramOutput();
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args)), 0, 'failed 1');
+
+ t.stop();
+
+ // Wait for all the shells to finish per SERVER-25777.
+ shells.forEach(function(join) {
+ join();
+ });
+
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_validation.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_validation.js
new file mode 100644
index 00000000000..0abe5a4c74b
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/mongotop_validation.js
@@ -0,0 +1,46 @@
+// mongotop_validation.js; ensure that running mongotop using invalid arguments
+// fail as expected
+var testName = 'mongotop_validation';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop with invalid arguments');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // checking the version should not return an error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--version'].concat(passthrough.args)), 0, '--version assertion failure 1');
+
+
+ // ensure tool returns an error...
+
+ // when used with an invalid port
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', 55555].concat(passthrough.args)), 0, '--port assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', 'hello'].concat(passthrough.args)), 0, '--port assertion failure 2');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', ''].concat(passthrough.args)), 0, '--port assertion failure 3');
+
+ // when supplied invalid row counts
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', '-2'].concat(passthrough.args)), 0, '--rowcount assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', 'hello'].concat(passthrough.args)), 0, '--rowcount assertion failure 2');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', ''].concat(passthrough.args)), 0, '--rowcount assertion failure 3');
+
+ // when supplied invalid sleep times
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '-4'].concat(passthrough.args)), 0, 'sleep time assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, 'forever'].concat(passthrough.args)), 0, 'sleep time assertion failure 2');
+
+ // when supplied invalid options
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--elder'].concat(passthrough.args)), 0, 'invalid options failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--price'].concat(passthrough.args)), 0, 'invalid options failure 2');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/util/mongotop_common.js b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/util/mongotop_common.js
new file mode 100644
index 00000000000..5d4e002fb92
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/test/qa-tests/jstests/top/util/mongotop_common.js
@@ -0,0 +1,25 @@
+// mongotop_common.js; contains variables used by mongotop tests
+/* exported executeProgram */
+/* exported extractJSON */
+load('jstests/common/topology_helper.js');
+load('jstests/libs/extended_assert.js');
+
+var executeProgram = function(args) {
+ clearRawMongoProgramOutput();
+ var pid = startMongoProgramNoConnect.apply(this, args);
+ var exitCode = waitProgram(pid);
+ var prefix = 'sh'+pid+'| ';
+ var getOutput = function() {
+ return rawMongoProgramOutput().split('\n').filter(function(line) {
+ return line.indexOf(prefix) === 0;
+ }).join('\n');
+ };
+ return {
+ exitCode: exitCode,
+ getOutput: getOutput,
+ };
+};
+
+var extractJSON = function(shellOutput) {
+ return shellOutput.substring(shellOutput.indexOf('{'), shellOutput.lastIndexOf('}') + 1);
+};
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/.travis.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 00000000000..9159de03e03
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/LICENSE b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 00000000000..835ba3e755c
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/Makefile b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000000..ce9d7cded64
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/README.md b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 00000000000..54dfdcb12ea
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/appveyor.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 00000000000..a932eade024
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/bench_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/bench_test.go
new file mode 100644
index 00000000000..c906870e0c2
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/bench_test.go
@@ -0,0 +1,110 @@
+// +build go1.7
+
+package errors
+
+import (
+ "fmt"
+ "testing"
+
+ stderrors "errors"
+)
+
+func noErrors(at, depth int) error {
+ if at >= depth {
+ return stderrors.New("no error")
+ }
+ return noErrors(at+1, depth)
+}
+
+func yesErrors(at, depth int) error {
+ if at >= depth {
+ return New("ye error")
+ }
+ return yesErrors(at+1, depth)
+}
+
+// GlobalE is an exported global to store the result of benchmark results,
+// preventing the compiler from optimising the benchmark functions away.
+var GlobalE interface{}
+
+func BenchmarkErrors(b *testing.B) {
+ type run struct {
+ stack int
+ std bool
+ }
+ runs := []run{
+ {10, false},
+ {10, true},
+ {100, false},
+ {100, true},
+ {1000, false},
+ {1000, true},
+ }
+ for _, r := range runs {
+ part := "pkg/errors"
+ if r.std {
+ part = "errors"
+ }
+ name := fmt.Sprintf("%s-stack-%d", part, r.stack)
+ b.Run(name, func(b *testing.B) {
+ var err error
+ f := yesErrors
+ if r.std {
+ f = noErrors
+ }
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ err = f(0, r.stack)
+ }
+ b.StopTimer()
+ GlobalE = err
+ })
+ }
+}
+
+func BenchmarkStackFormatting(b *testing.B) {
+ type run struct {
+ stack int
+ format string
+ }
+ runs := []run{
+ {10, "%s"},
+ {10, "%v"},
+ {10, "%+v"},
+ {30, "%s"},
+ {30, "%v"},
+ {30, "%+v"},
+ {60, "%s"},
+ {60, "%v"},
+ {60, "%+v"},
+ }
+
+ var stackStr string
+ for _, r := range runs {
+ name := fmt.Sprintf("%s-stack-%d", r.format, r.stack)
+ b.Run(name, func(b *testing.B) {
+ err := yesErrors(0, r.stack)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ stackStr = fmt.Sprintf(r.format, err)
+ }
+ b.StopTimer()
+ })
+ }
+
+ for _, r := range runs {
+ name := fmt.Sprintf("%s-stacktrace-%d", r.format, r.stack)
+ b.Run(name, func(b *testing.B) {
+ err := yesErrors(0, r.stack)
+ st := err.(*fundamental).stack.StackTrace()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ stackStr = fmt.Sprintf(r.format, st)
+ }
+ b.StopTimer()
+ })
+ }
+ GlobalE = stackStr
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 00000000000..161aea25829
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors_test.go
new file mode 100644
index 00000000000..2089b2f762d
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/errors_test.go
@@ -0,0 +1,251 @@
+package errors
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "testing"
+)
+
+func TestNew(t *testing.T) {
+ tests := []struct {
+ err string
+ want error
+ }{
+ {"", fmt.Errorf("")},
+ {"foo", fmt.Errorf("foo")},
+ {"foo", New("foo")},
+ {"string with format specifiers: %v", errors.New("string with format specifiers: %v")},
+ }
+
+ for _, tt := range tests {
+ got := New(tt.err)
+ if got.Error() != tt.want.Error() {
+ t.Errorf("New.Error(): got: %q, want %q", got, tt.want)
+ }
+ }
+}
+
+func TestWrapNil(t *testing.T) {
+ got := Wrap(nil, "no error")
+ if got != nil {
+ t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got)
+ }
+}
+
+func TestWrap(t *testing.T) {
+ tests := []struct {
+ err error
+ message string
+ want string
+ }{
+ {io.EOF, "read error", "read error: EOF"},
+ {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"},
+ }
+
+ for _, tt := range tests {
+ got := Wrap(tt.err, tt.message).Error()
+ if got != tt.want {
+ t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
+ }
+ }
+}
+
+type nilError struct{}
+
+func (nilError) Error() string { return "nil error" }
+
+func TestCause(t *testing.T) {
+ x := New("error")
+ tests := []struct {
+ err error
+ want error
+ }{{
+ // nil error is nil
+ err: nil,
+ want: nil,
+ }, {
+ // explicit nil error is nil
+ err: (error)(nil),
+ want: nil,
+ }, {
+ // typed nil is nil
+ err: (*nilError)(nil),
+ want: (*nilError)(nil),
+ }, {
+ // uncaused error is unaffected
+ err: io.EOF,
+ want: io.EOF,
+ }, {
+ // caused error returns cause
+ err: Wrap(io.EOF, "ignored"),
+ want: io.EOF,
+ }, {
+ err: x, // return from errors.New
+ want: x,
+ }, {
+ WithMessage(nil, "whoops"),
+ nil,
+ }, {
+ WithMessage(io.EOF, "whoops"),
+ io.EOF,
+ }, {
+ WithStack(nil),
+ nil,
+ }, {
+ WithStack(io.EOF),
+ io.EOF,
+ }}
+
+ for i, tt := range tests {
+ got := Cause(tt.err)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want)
+ }
+ }
+}
+
+func TestWrapfNil(t *testing.T) {
+ got := Wrapf(nil, "no error")
+ if got != nil {
+ t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got)
+ }
+}
+
+func TestWrapf(t *testing.T) {
+ tests := []struct {
+ err error
+ message string
+ want string
+ }{
+ {io.EOF, "read error", "read error: EOF"},
+ {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"},
+ {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"},
+ }
+
+ for _, tt := range tests {
+ got := Wrapf(tt.err, tt.message).Error()
+ if got != tt.want {
+ t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
+ }
+ }
+}
+
+func TestErrorf(t *testing.T) {
+ tests := []struct {
+ err error
+ want string
+ }{
+ {Errorf("read error without format specifiers"), "read error without format specifiers"},
+ {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"},
+ }
+
+ for _, tt := range tests {
+ got := tt.err.Error()
+ if got != tt.want {
+ t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want)
+ }
+ }
+}
+
+func TestWithStackNil(t *testing.T) {
+ got := WithStack(nil)
+ if got != nil {
+ t.Errorf("WithStack(nil): got %#v, expected nil", got)
+ }
+}
+
+func TestWithStack(t *testing.T) {
+ tests := []struct {
+ err error
+ want string
+ }{
+ {io.EOF, "EOF"},
+ {WithStack(io.EOF), "EOF"},
+ }
+
+ for _, tt := range tests {
+ got := WithStack(tt.err).Error()
+ if got != tt.want {
+ t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want)
+ }
+ }
+}
+
+func TestWithMessageNil(t *testing.T) {
+ got := WithMessage(nil, "no error")
+ if got != nil {
+ t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got)
+ }
+}
+
+func TestWithMessage(t *testing.T) {
+ tests := []struct {
+ err error
+ message string
+ want string
+ }{
+ {io.EOF, "read error", "read error: EOF"},
+ {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"},
+ }
+
+ for _, tt := range tests {
+ got := WithMessage(tt.err, tt.message).Error()
+ if got != tt.want {
+ t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
+ }
+ }
+}
+
+func TestWithMessagefNil(t *testing.T) {
+ got := WithMessagef(nil, "no error")
+ if got != nil {
+ t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got)
+ }
+}
+
+func TestWithMessagef(t *testing.T) {
+ tests := []struct {
+ err error
+ message string
+ want string
+ }{
+ {io.EOF, "read error", "read error: EOF"},
+ {WithMessagef(io.EOF, "read error without format specifier"), "client error", "client error: read error without format specifier: EOF"},
+ {WithMessagef(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"},
+ }
+
+ for _, tt := range tests {
+ got := WithMessagef(tt.err, tt.message).Error()
+ if got != tt.want {
+ t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
+ }
+ }
+}
+
+// errors.New, etc values are not expected to be compared by value
+// but the change in errors#27 made them incomparable. Assert that
+// various kinds of errors have a functional equality operator, even
+// if the result of that equality is always false.
+func TestErrorEquality(t *testing.T) {
+ vals := []error{
+ nil,
+ io.EOF,
+ errors.New("EOF"),
+ New("EOF"),
+ Errorf("EOF"),
+ Wrap(io.EOF, "EOF"),
+ Wrapf(io.EOF, "EOF%d", 2),
+ WithMessage(nil, "whoops"),
+ WithMessage(io.EOF, "whoops"),
+ WithStack(io.EOF),
+ WithStack(nil),
+ }
+
+ for i := range vals {
+ for j := range vals {
+ _ = vals[i] == vals[j] // mustn't panic
+ }
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/example_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/example_test.go
new file mode 100644
index 00000000000..7d0e286f4bb
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/example_test.go
@@ -0,0 +1,205 @@
+package errors_test
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+func ExampleNew() {
+ err := errors.New("whoops")
+ fmt.Println(err)
+
+ // Output: whoops
+}
+
+func ExampleNew_printf() {
+ err := errors.New("whoops")
+ fmt.Printf("%+v", err)
+
+ // Example output:
+ // whoops
+ // github.com/pkg/errors_test.ExampleNew_printf
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:17
+ // testing.runExample
+ // /home/dfc/go/src/testing/example.go:114
+ // testing.RunExamples
+ // /home/dfc/go/src/testing/example.go:38
+ // testing.(*M).Run
+ // /home/dfc/go/src/testing/testing.go:744
+ // main.main
+ // /github.com/pkg/errors/_test/_testmain.go:106
+ // runtime.main
+ // /home/dfc/go/src/runtime/proc.go:183
+ // runtime.goexit
+ // /home/dfc/go/src/runtime/asm_amd64.s:2059
+}
+
+func ExampleWithMessage() {
+ cause := errors.New("whoops")
+ err := errors.WithMessage(cause, "oh noes")
+ fmt.Println(err)
+
+ // Output: oh noes: whoops
+}
+
+func ExampleWithStack() {
+ cause := errors.New("whoops")
+ err := errors.WithStack(cause)
+ fmt.Println(err)
+
+ // Output: whoops
+}
+
+func ExampleWithStack_printf() {
+ cause := errors.New("whoops")
+ err := errors.WithStack(cause)
+ fmt.Printf("%+v", err)
+
+ // Example Output:
+ // whoops
+ // github.com/pkg/errors_test.ExampleWithStack_printf
+ // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55
+ // testing.runExample
+ // /usr/lib/go/src/testing/example.go:114
+ // testing.RunExamples
+ // /usr/lib/go/src/testing/example.go:38
+ // testing.(*M).Run
+ // /usr/lib/go/src/testing/testing.go:744
+ // main.main
+ // github.com/pkg/errors/_test/_testmain.go:106
+ // runtime.main
+ // /usr/lib/go/src/runtime/proc.go:183
+ // runtime.goexit
+ // /usr/lib/go/src/runtime/asm_amd64.s:2086
+ // github.com/pkg/errors_test.ExampleWithStack_printf
+ // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56
+ // testing.runExample
+ // /usr/lib/go/src/testing/example.go:114
+ // testing.RunExamples
+ // /usr/lib/go/src/testing/example.go:38
+ // testing.(*M).Run
+ // /usr/lib/go/src/testing/testing.go:744
+ // main.main
+ // github.com/pkg/errors/_test/_testmain.go:106
+ // runtime.main
+ // /usr/lib/go/src/runtime/proc.go:183
+ // runtime.goexit
+ // /usr/lib/go/src/runtime/asm_amd64.s:2086
+}
+
+func ExampleWrap() {
+ cause := errors.New("whoops")
+ err := errors.Wrap(cause, "oh noes")
+ fmt.Println(err)
+
+ // Output: oh noes: whoops
+}
+
+func fn() error {
+ e1 := errors.New("error")
+ e2 := errors.Wrap(e1, "inner")
+ e3 := errors.Wrap(e2, "middle")
+ return errors.Wrap(e3, "outer")
+}
+
+func ExampleCause() {
+ err := fn()
+ fmt.Println(err)
+ fmt.Println(errors.Cause(err))
+
+ // Output: outer: middle: inner: error
+ // error
+}
+
+func ExampleWrap_extended() {
+ err := fn()
+ fmt.Printf("%+v\n", err)
+
+ // Example output:
+ // error
+ // github.com/pkg/errors_test.fn
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:47
+ // github.com/pkg/errors_test.ExampleCause_printf
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:63
+ // testing.runExample
+ // /home/dfc/go/src/testing/example.go:114
+ // testing.RunExamples
+ // /home/dfc/go/src/testing/example.go:38
+ // testing.(*M).Run
+ // /home/dfc/go/src/testing/testing.go:744
+ // main.main
+ // /github.com/pkg/errors/_test/_testmain.go:104
+ // runtime.main
+ // /home/dfc/go/src/runtime/proc.go:183
+ // runtime.goexit
+ // /home/dfc/go/src/runtime/asm_amd64.s:2059
+ // github.com/pkg/errors_test.fn
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner
+ // github.com/pkg/errors_test.fn
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle
+ // github.com/pkg/errors_test.fn
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer
+}
+
+func ExampleWrapf() {
+ cause := errors.New("whoops")
+ err := errors.Wrapf(cause, "oh noes #%d", 2)
+ fmt.Println(err)
+
+ // Output: oh noes #2: whoops
+}
+
+func ExampleErrorf_extended() {
+ err := errors.Errorf("whoops: %s", "foo")
+ fmt.Printf("%+v", err)
+
+ // Example output:
+ // whoops: foo
+ // github.com/pkg/errors_test.ExampleErrorf
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:101
+ // testing.runExample
+ // /home/dfc/go/src/testing/example.go:114
+ // testing.RunExamples
+ // /home/dfc/go/src/testing/example.go:38
+ // testing.(*M).Run
+ // /home/dfc/go/src/testing/testing.go:744
+ // main.main
+ // /github.com/pkg/errors/_test/_testmain.go:102
+ // runtime.main
+ // /home/dfc/go/src/runtime/proc.go:183
+ // runtime.goexit
+ // /home/dfc/go/src/runtime/asm_amd64.s:2059
+}
+
+func Example_stackTrace() {
+ type stackTracer interface {
+ StackTrace() errors.StackTrace
+ }
+
+ err, ok := errors.Cause(fn()).(stackTracer)
+ if !ok {
+ panic("oops, err does not implement stackTracer")
+ }
+
+ st := err.StackTrace()
+ fmt.Printf("%+v", st[0:2]) // top two frames
+
+ // Example output:
+ // github.com/pkg/errors_test.fn
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:47
+ // github.com/pkg/errors_test.Example_stackTrace
+ // /home/dfc/src/github.com/pkg/errors/example_test.go:127
+}
+
+func ExampleCause_printf() {
+ err := errors.Wrap(func() error {
+ return func() error {
+ return errors.New("hello world")
+ }()
+ }(), "failed")
+
+ fmt.Printf("%v", err)
+
+ // Output: failed: hello world
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/format_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/format_test.go
new file mode 100644
index 00000000000..cb1df821fcf
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/format_test.go
@@ -0,0 +1,560 @@
+package errors
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestFormatNew(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want string
+ }{{
+ New("error"),
+ "%s",
+ "error",
+ }, {
+ New("error"),
+ "%v",
+ "error",
+ }, {
+ New("error"),
+ "%+v",
+ "error\n" +
+ "github.com/pkg/errors.TestFormatNew\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:26",
+ }, {
+ New("error"),
+ "%q",
+ `"error"`,
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.error, tt.format, tt.want)
+ }
+}
+
+func TestFormatErrorf(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want string
+ }{{
+ Errorf("%s", "error"),
+ "%s",
+ "error",
+ }, {
+ Errorf("%s", "error"),
+ "%v",
+ "error",
+ }, {
+ Errorf("%s", "error"),
+ "%+v",
+ "error\n" +
+ "github.com/pkg/errors.TestFormatErrorf\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:56",
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.error, tt.format, tt.want)
+ }
+}
+
+func TestFormatWrap(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want string
+ }{{
+ Wrap(New("error"), "error2"),
+ "%s",
+ "error2: error",
+ }, {
+ Wrap(New("error"), "error2"),
+ "%v",
+ "error2: error",
+ }, {
+ Wrap(New("error"), "error2"),
+ "%+v",
+ "error\n" +
+ "github.com/pkg/errors.TestFormatWrap\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:82",
+ }, {
+ Wrap(io.EOF, "error"),
+ "%s",
+ "error: EOF",
+ }, {
+ Wrap(io.EOF, "error"),
+ "%v",
+ "error: EOF",
+ }, {
+ Wrap(io.EOF, "error"),
+ "%+v",
+ "EOF\n" +
+ "error\n" +
+ "github.com/pkg/errors.TestFormatWrap\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:96",
+ }, {
+ Wrap(Wrap(io.EOF, "error1"), "error2"),
+ "%+v",
+ "EOF\n" +
+ "error1\n" +
+ "github.com/pkg/errors.TestFormatWrap\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:103\n",
+ }, {
+ Wrap(New("error with space"), "context"),
+ "%q",
+ `"context: error with space"`,
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.error, tt.format, tt.want)
+ }
+}
+
+func TestFormatWrapf(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want string
+ }{{
+ Wrapf(io.EOF, "error%d", 2),
+ "%s",
+ "error2: EOF",
+ }, {
+ Wrapf(io.EOF, "error%d", 2),
+ "%v",
+ "error2: EOF",
+ }, {
+ Wrapf(io.EOF, "error%d", 2),
+ "%+v",
+ "EOF\n" +
+ "error2\n" +
+ "github.com/pkg/errors.TestFormatWrapf\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:134",
+ }, {
+ Wrapf(New("error"), "error%d", 2),
+ "%s",
+ "error2: error",
+ }, {
+ Wrapf(New("error"), "error%d", 2),
+ "%v",
+ "error2: error",
+ }, {
+ Wrapf(New("error"), "error%d", 2),
+ "%+v",
+ "error\n" +
+ "github.com/pkg/errors.TestFormatWrapf\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:149",
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.error, tt.format, tt.want)
+ }
+}
+
+func TestFormatWithStack(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want []string
+ }{{
+ WithStack(io.EOF),
+ "%s",
+ []string{"EOF"},
+ }, {
+ WithStack(io.EOF),
+ "%v",
+ []string{"EOF"},
+ }, {
+ WithStack(io.EOF),
+ "%+v",
+ []string{"EOF",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:175"},
+ }, {
+ WithStack(New("error")),
+ "%s",
+ []string{"error"},
+ }, {
+ WithStack(New("error")),
+ "%v",
+ []string{"error"},
+ }, {
+ WithStack(New("error")),
+ "%+v",
+ []string{"error",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:189",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:189"},
+ }, {
+ WithStack(WithStack(io.EOF)),
+ "%+v",
+ []string{"EOF",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:197",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:197"},
+ }, {
+ WithStack(WithStack(Wrapf(io.EOF, "message"))),
+ "%+v",
+ []string{"EOF",
+ "message",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:205",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:205",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:205"},
+ }, {
+ WithStack(Errorf("error%d", 1)),
+ "%+v",
+ []string{"error1",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:216",
+ "github.com/pkg/errors.TestFormatWithStack\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:216"},
+ }}
+
+ for i, tt := range tests {
+ testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
+ }
+}
+
+func TestFormatWithMessage(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want []string
+ }{{
+ WithMessage(New("error"), "error2"),
+ "%s",
+ []string{"error2: error"},
+ }, {
+ WithMessage(New("error"), "error2"),
+ "%v",
+ []string{"error2: error"},
+ }, {
+ WithMessage(New("error"), "error2"),
+ "%+v",
+ []string{
+ "error",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:244",
+ "error2"},
+ }, {
+ WithMessage(io.EOF, "addition1"),
+ "%s",
+ []string{"addition1: EOF"},
+ }, {
+ WithMessage(io.EOF, "addition1"),
+ "%v",
+ []string{"addition1: EOF"},
+ }, {
+ WithMessage(io.EOF, "addition1"),
+ "%+v",
+ []string{"EOF", "addition1"},
+ }, {
+ WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
+ "%v",
+ []string{"addition2: addition1: EOF"},
+ }, {
+ WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
+ "%+v",
+ []string{"EOF", "addition1", "addition2"},
+ }, {
+ Wrap(WithMessage(io.EOF, "error1"), "error2"),
+ "%+v",
+ []string{"EOF", "error1", "error2",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:272"},
+ }, {
+ WithMessage(Errorf("error%d", 1), "error2"),
+ "%+v",
+ []string{"error1",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:278",
+ "error2"},
+ }, {
+ WithMessage(WithStack(io.EOF), "error"),
+ "%+v",
+ []string{
+ "EOF",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:285",
+ "error"},
+ }, {
+ WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"),
+ "%+v",
+ []string{
+ "EOF",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:293",
+ "inside-error",
+ "github.com/pkg/errors.TestFormatWithMessage\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:293",
+ "outside-error"},
+ }}
+
+ for i, tt := range tests {
+ testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
+ }
+}
+
+func TestFormatGeneric(t *testing.T) {
+ starts := []struct {
+ err error
+ want []string
+ }{
+ {New("new-error"), []string{
+ "new-error",
+ "github.com/pkg/errors.TestFormatGeneric\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:315"},
+ }, {Errorf("errorf-error"), []string{
+ "errorf-error",
+ "github.com/pkg/errors.TestFormatGeneric\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:319"},
+ }, {errors.New("errors-new-error"), []string{
+ "errors-new-error"},
+ },
+ }
+
+ wrappers := []wrapper{
+ {
+ func(err error) error { return WithMessage(err, "with-message") },
+ []string{"with-message"},
+ }, {
+ func(err error) error { return WithStack(err) },
+ []string{
+ "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" +
+ ".+/github.com/pkg/errors/format_test.go:333",
+ },
+ }, {
+ func(err error) error { return Wrap(err, "wrap-error") },
+ []string{
+ "wrap-error",
+ "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" +
+ ".+/github.com/pkg/errors/format_test.go:339",
+ },
+ }, {
+ func(err error) error { return Wrapf(err, "wrapf-error%d", 1) },
+ []string{
+ "wrapf-error1",
+ "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" +
+ ".+/github.com/pkg/errors/format_test.go:346",
+ },
+ },
+ }
+
+ for s := range starts {
+ err := starts[s].err
+ want := starts[s].want
+ testFormatCompleteCompare(t, s, err, "%+v", want, false)
+ testGenericRecursive(t, err, want, wrappers, 3)
+ }
+}
+
+func wrappedNew(message string) error { // This function will be mid-stack inlined in go 1.12+
+ return New(message)
+}
+
+func TestFormatWrappedNew(t *testing.T) {
+ tests := []struct {
+ error
+ format string
+ want string
+ }{{
+ wrappedNew("error"),
+ "%+v",
+ "error\n" +
+ "github.com/pkg/errors.wrappedNew\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:364\n" +
+ "github.com/pkg/errors.TestFormatWrappedNew\n" +
+ "\t.+/github.com/pkg/errors/format_test.go:373",
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.error, tt.format, tt.want)
+ }
+}
+
+func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) {
+ t.Helper()
+ got := fmt.Sprintf(format, arg)
+ gotLines := strings.SplitN(got, "\n", -1)
+ wantLines := strings.SplitN(want, "\n", -1)
+
+ if len(wantLines) > len(gotLines) {
+ t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want)
+ return
+ }
+
+ for i, w := range wantLines {
+ match, err := regexp.MatchString(w, gotLines[i])
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !match {
+ t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want)
+ }
+ }
+}
+
+var stackLineR = regexp.MustCompile(`\.`)
+
+// parseBlocks parses input into a slice, where:
+// - incase entry contains a newline, its a stacktrace
+// - incase entry contains no newline, its a solo line.
+//
+// Detecting stack boundaries only works incase the WithStack-calls are
+// to be found on the same line, thats why it is optionally here.
+//
+// Example use:
+//
+// for _, e := range blocks {
+// if strings.ContainsAny(e, "\n") {
+// // Match as stack
+// } else {
+// // Match as line
+// }
+// }
+//
+func parseBlocks(input string, detectStackboundaries bool) ([]string, error) {
+ var blocks []string
+
+ stack := ""
+ wasStack := false
+ lines := map[string]bool{} // already found lines
+
+ for _, l := range strings.Split(input, "\n") {
+ isStackLine := stackLineR.MatchString(l)
+
+ switch {
+ case !isStackLine && wasStack:
+ blocks = append(blocks, stack, l)
+ stack = ""
+ lines = map[string]bool{}
+ case isStackLine:
+ if wasStack {
+ // Detecting two stacks after another, possible cause lines match in
+ // our tests due to WithStack(WithStack(io.EOF)) on same line.
+ if detectStackboundaries {
+ if lines[l] {
+ if len(stack) == 0 {
+ return nil, errors.New("len of block must not be zero here")
+ }
+
+ blocks = append(blocks, stack)
+ stack = l
+ lines = map[string]bool{l: true}
+ continue
+ }
+ }
+
+ stack = stack + "\n" + l
+ } else {
+ stack = l
+ }
+ lines[l] = true
+ case !isStackLine && !wasStack:
+ blocks = append(blocks, l)
+ default:
+ return nil, errors.New("must not happen")
+ }
+
+ wasStack = isStackLine
+ }
+
+ // Use up stack
+ if stack != "" {
+ blocks = append(blocks, stack)
+ }
+ return blocks, nil
+}
+
+func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) {
+ gotStr := fmt.Sprintf(format, arg)
+
+ got, err := parseBlocks(gotStr, detectStackBoundaries)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(got) != len(want) {
+ t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q",
+ n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr)
+ }
+
+ for i := range got {
+ if strings.ContainsAny(want[i], "\n") {
+ // Match as stack
+ match, err := regexp.MatchString(want[i], got[i])
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !match {
+ t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n",
+ n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want))
+ }
+ } else {
+ // Match as message
+ if got[i] != want[i] {
+ t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i])
+ }
+ }
+ }
+}
+
+type wrapper struct {
+ wrap func(err error) error
+ want []string
+}
+
+func prettyBlocks(blocks []string) string {
+ var out []string
+
+ for _, b := range blocks {
+ out = append(out, fmt.Sprintf("%v", b))
+ }
+
+ return " " + strings.Join(out, "\n ")
+}
+
+func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) {
+ if len(beforeWant) == 0 {
+ panic("beforeWant must not be empty")
+ }
+ for _, w := range list {
+ if len(w.want) == 0 {
+ panic("want must not be empty")
+ }
+
+ err := w.wrap(beforeErr)
+
+ // Copy required cause append(beforeWant, ..) modified beforeWant subtly.
+ beforeCopy := make([]string, len(beforeWant))
+ copy(beforeCopy, beforeWant)
+
+ beforeWant := beforeCopy
+ last := len(beforeWant) - 1
+ var want []string
+
+ // Merge two stacks behind each other.
+ if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") {
+ want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...)
+ } else {
+ want = append(beforeWant, w.want...)
+ }
+
+ testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false)
+ if maxDepth > 0 {
+ testGenericRecursive(t, err, want, list, maxDepth-1)
+ }
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000000..be0d10d0c79
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113_test.go
new file mode 100644
index 00000000000..4ea37e61975
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/go113_test.go
@@ -0,0 +1,178 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func TestErrorChainCompat(t *testing.T) {
+ err := stderrors.New("error that gets wrapped")
+ wrapped := Wrap(err, "wrapped up")
+ if !stderrors.Is(wrapped, err) {
+ t.Errorf("Wrap does not support Go 1.13 error chains")
+ }
+}
+
+func TestIs(t *testing.T) {
+ err := New("test")
+
+ type args struct {
+ err error
+ target error
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "with stack",
+ args: args{
+ err: WithStack(err),
+ target: err,
+ },
+ want: true,
+ },
+ {
+ name: "with message",
+ args: args{
+ err: WithMessage(err, "test"),
+ target: err,
+ },
+ want: true,
+ },
+ {
+ name: "with message format",
+ args: args{
+ err: WithMessagef(err, "%s", "test"),
+ target: err,
+ },
+ want: true,
+ },
+ {
+ name: "std errors compatibility",
+ args: args{
+ err: fmt.Errorf("wrap it: %w", err),
+ target: err,
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := Is(tt.args.err, tt.args.target); got != tt.want {
+ t.Errorf("Is() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+type customErr struct {
+ msg string
+}
+
+func (c customErr) Error() string { return c.msg }
+
+func TestAs(t *testing.T) {
+ var err = customErr{msg: "test message"}
+
+ type args struct {
+ err error
+ target interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "with stack",
+ args: args{
+ err: WithStack(err),
+ target: new(customErr),
+ },
+ want: true,
+ },
+ {
+ name: "with message",
+ args: args{
+ err: WithMessage(err, "test"),
+ target: new(customErr),
+ },
+ want: true,
+ },
+ {
+ name: "with message format",
+ args: args{
+ err: WithMessagef(err, "%s", "test"),
+ target: new(customErr),
+ },
+ want: true,
+ },
+ {
+ name: "std errors compatibility",
+ args: args{
+ err: fmt.Errorf("wrap it: %w", err),
+ target: new(customErr),
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := As(tt.args.err, tt.args.target); got != tt.want {
+ t.Errorf("As() = %v, want %v", got, tt.want)
+ }
+
+ ce := tt.args.target.(*customErr)
+ if !reflect.DeepEqual(err, *ce) {
+ t.Errorf("set target error failed, target error is %v", *ce)
+ }
+ })
+ }
+}
+
+func TestUnwrap(t *testing.T) {
+ err := New("test")
+
+ type args struct {
+ err error
+ }
+ tests := []struct {
+ name string
+ args args
+ want error
+ }{
+ {
+ name: "with stack",
+ args: args{err: WithStack(err)},
+ want: err,
+ },
+ {
+ name: "with message",
+ args: args{err: WithMessage(err, "test")},
+ want: err,
+ },
+ {
+ name: "with message format",
+ args: args{err: WithMessagef(err, "%s", "test")},
+ want: err,
+ },
+ {
+ name: "std errors compatibility",
+ args: args{err: fmt.Errorf("wrap: %w", err)},
+ want: err,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := Unwrap(tt.args.err); !reflect.DeepEqual(err, tt.want) {
+ t.Errorf("Unwrap() error = %v, want %v", err, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/json_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/json_test.go
new file mode 100644
index 00000000000..ad1adec9c81
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/json_test.go
@@ -0,0 +1,51 @@
+package errors
+
+import (
+ "encoding/json"
+ "regexp"
+ "testing"
+)
+
+func TestFrameMarshalText(t *testing.T) {
+ var tests = []struct {
+ Frame
+ want string
+ }{{
+ initpc,
+ `^github.com/pkg/errors\.init(\.ializers)? .+/github\.com/pkg/errors/stack_test.go:\d+$`,
+ }, {
+ 0,
+ `^unknown$`,
+ }}
+ for i, tt := range tests {
+ got, err := tt.Frame.MarshalText()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !regexp.MustCompile(tt.want).Match(got) {
+ t.Errorf("test %d: MarshalJSON:\n got %q\n want %q", i+1, string(got), tt.want)
+ }
+ }
+}
+
+func TestFrameMarshalJSON(t *testing.T) {
+ var tests = []struct {
+ Frame
+ want string
+ }{{
+ initpc,
+ `^"github\.com/pkg/errors\.init(\.ializers)? .+/github\.com/pkg/errors/stack_test.go:\d+"$`,
+ }, {
+ 0,
+ `^"unknown"$`,
+ }}
+ for i, tt := range tests {
+ got, err := json.Marshal(tt.Frame)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !regexp.MustCompile(tt.want).Match(got) {
+ t.Errorf("test %d: MarshalJSON:\n got %q\n want %q", i+1, string(got), tt.want)
+ }
+ }
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 00000000000..779a8348fb9
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (<funcname>\n\t<path>)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack_test.go
new file mode 100644
index 00000000000..aa10a72e487
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/github.com/pkg/errors/stack_test.go
@@ -0,0 +1,250 @@
+package errors
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+)
+
+var initpc = caller()
+
+type X struct{}
+
+// val returns a Frame pointing to itself.
+func (x X) val() Frame {
+ return caller()
+}
+
+// ptr returns a Frame pointing to itself.
+func (x *X) ptr() Frame {
+ return caller()
+}
+
+func TestFrameFormat(t *testing.T) {
+ var tests = []struct {
+ Frame
+ format string
+ want string
+ }{{
+ initpc,
+ "%s",
+ "stack_test.go",
+ }, {
+ initpc,
+ "%+s",
+ "github.com/pkg/errors.init\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go",
+ }, {
+ 0,
+ "%s",
+ "unknown",
+ }, {
+ 0,
+ "%+s",
+ "unknown",
+ }, {
+ initpc,
+ "%d",
+ "9",
+ }, {
+ 0,
+ "%d",
+ "0",
+ }, {
+ initpc,
+ "%n",
+ "init",
+ }, {
+ func() Frame {
+ var x X
+ return x.ptr()
+ }(),
+ "%n",
+ `\(\*X\).ptr`,
+ }, {
+ func() Frame {
+ var x X
+ return x.val()
+ }(),
+ "%n",
+ "X.val",
+ }, {
+ 0,
+ "%n",
+ "",
+ }, {
+ initpc,
+ "%v",
+ "stack_test.go:9",
+ }, {
+ initpc,
+ "%+v",
+ "github.com/pkg/errors.init\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:9",
+ }, {
+ 0,
+ "%v",
+ "unknown:0",
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.Frame, tt.format, tt.want)
+ }
+}
+
+func TestFuncname(t *testing.T) {
+ tests := []struct {
+ name, want string
+ }{
+ {"", ""},
+ {"runtime.main", "main"},
+ {"github.com/pkg/errors.funcname", "funcname"},
+ {"funcname", "funcname"},
+ {"io.copyBuffer", "copyBuffer"},
+ {"main.(*R).Write", "(*R).Write"},
+ }
+
+ for _, tt := range tests {
+ got := funcname(tt.name)
+ want := tt.want
+ if got != want {
+ t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got)
+ }
+ }
+}
+
+func TestStackTrace(t *testing.T) {
+ tests := []struct {
+ err error
+ want []string
+ }{{
+ New("ooh"), []string{
+ "github.com/pkg/errors.TestStackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:121",
+ },
+ }, {
+ Wrap(New("ooh"), "ahh"), []string{
+ "github.com/pkg/errors.TestStackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:126", // this is the stack of Wrap, not New
+ },
+ }, {
+ Cause(Wrap(New("ooh"), "ahh")), []string{
+ "github.com/pkg/errors.TestStackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:131", // this is the stack of New
+ },
+ }, {
+ func() error { return New("ooh") }(), []string{
+ `github.com/pkg/errors.TestStackTrace.func1` +
+ "\n\t.+/github.com/pkg/errors/stack_test.go:136", // this is the stack of New
+ "github.com/pkg/errors.TestStackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:136", // this is the stack of New's caller
+ },
+ }, {
+ Cause(func() error {
+ return func() error {
+ return Errorf("hello %s", fmt.Sprintf("world: %s", "ooh"))
+ }()
+ }()), []string{
+ `github.com/pkg/errors.TestStackTrace.func2.1` +
+ "\n\t.+/github.com/pkg/errors/stack_test.go:145", // this is the stack of Errorf
+ `github.com/pkg/errors.TestStackTrace.func2` +
+ "\n\t.+/github.com/pkg/errors/stack_test.go:146", // this is the stack of Errorf's caller
+ "github.com/pkg/errors.TestStackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:147", // this is the stack of Errorf's caller's caller
+ },
+ }}
+ for i, tt := range tests {
+ x, ok := tt.err.(interface {
+ StackTrace() StackTrace
+ })
+ if !ok {
+ t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err)
+ continue
+ }
+ st := x.StackTrace()
+ for j, want := range tt.want {
+ testFormatRegexp(t, i, st[j], "%+v", want)
+ }
+ }
+}
+
+func stackTrace() StackTrace {
+ const depth = 8
+ var pcs [depth]uintptr
+ n := runtime.Callers(1, pcs[:])
+ var st stack = pcs[0:n]
+ return st.StackTrace()
+}
+
+func TestStackTraceFormat(t *testing.T) {
+ tests := []struct {
+ StackTrace
+ format string
+ want string
+ }{{
+ nil,
+ "%s",
+ `\[\]`,
+ }, {
+ nil,
+ "%v",
+ `\[\]`,
+ }, {
+ nil,
+ "%+v",
+ "",
+ }, {
+ nil,
+ "%#v",
+ `\[\]errors.Frame\(nil\)`,
+ }, {
+ make(StackTrace, 0),
+ "%s",
+ `\[\]`,
+ }, {
+ make(StackTrace, 0),
+ "%v",
+ `\[\]`,
+ }, {
+ make(StackTrace, 0),
+ "%+v",
+ "",
+ }, {
+ make(StackTrace, 0),
+ "%#v",
+ `\[\]errors.Frame{}`,
+ }, {
+ stackTrace()[:2],
+ "%s",
+ `\[stack_test.go stack_test.go\]`,
+ }, {
+ stackTrace()[:2],
+ "%v",
+ `\[stack_test.go:174 stack_test.go:221\]`,
+ }, {
+ stackTrace()[:2],
+ "%+v",
+ "\n" +
+ "github.com/pkg/errors.stackTrace\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:174\n" +
+ "github.com/pkg/errors.TestStackTraceFormat\n" +
+ "\t.+/github.com/pkg/errors/stack_test.go:225",
+ }, {
+ stackTrace()[:2],
+ "%#v",
+ `\[\]errors.Frame{stack_test.go:174, stack_test.go:233}`,
+ }}
+
+ for i, tt := range tests {
+ testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want)
+ }
+}
+
+// a version of runtime.Caller that returns a Frame, not a uintptr.
+func caller() Frame {
+ var pcs [3]uintptr
+ n := runtime.Callers(2, pcs[:])
+ frames := runtime.CallersFrames(pcs[:n])
+ frame, _ := frames.Next()
+ return Frame(frame.PC)
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/.travis.yml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/.travis.yml
new file mode 100644
index 00000000000..055480b9ef8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 00000000000..8dada3edaf5
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 00000000000..8da58fbf6f8
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+ apic.go
+ emitterc.go
+ parserc.go
+ readerc.go
+ scannerc.go
+ writerc.go
+ yamlh.go
+ yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/NOTICE b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 00000000000..866d74a7ad7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/README.md b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 00000000000..b50c6e87755
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,133 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/apic.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 00000000000..d2c2308f1f4
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,740 @@
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 00000000000..129bc2a97d3
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,815 @@
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ // For an alias node, alias holds the resolved alias.
+ alias *node
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("attempted to parse unknown event: " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ n.children = append(n.children, p.parse())
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ n.alias = p.doc.anchors[n.value]
+ if n.alias == nil {
+ failf("unknown anchor '%s' referenced", n.value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[*node]bool
+ mapType reflect.Type
+ terrors []string
+ strict bool
+
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ mapItemType = reflect.TypeOf(MapItem{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = defaultMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+ d := &decoder{mapType: defaultMapType, strict: strict}
+ d.aliases = make(map[*node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+ if n.tag != "" {
+ tag = n.tag
+ }
+ value := n.value
+ if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ switch n.kind {
+ case documentNode:
+ return d.document(n, out)
+ case aliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.kind {
+ case scalarNode:
+ good = d.scalar(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = yaml_STR_TAG
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ if tag == yaml_BINARY_TAG {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.Kind() == reflect.Map && !out.CanAddr() {
+ resetMap(out)
+ } else {
+ out.Set(reflect.Zero(out.Type()))
+ }
+ return true
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == yaml_BINARY_TAG {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == yaml_BINARY_TAG {
+ out.SetString(resolved.(string))
+ return true
+ }
+ if resolved != nil {
+ out.SetString(n.value)
+ return true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else if tag == yaml_TIMESTAMP_TAG {
+ // It looks like a timestamp but for backward compatibility
+ // reasons we set it as a string, so that code that unmarshals
+ // timestamp-like values into interface{} will continue to
+ // see a string and not a time.Time.
+ // TODO(v3) Drop this.
+ out.Set(reflect.ValueOf(n.value))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ return true
+ }
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ l := len(n.children)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, yaml_SEQ_TAG, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Slice:
+ return d.mappingSlice(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ if d.mapType.Kind() == reflect.Map {
+ iface := out
+ out = reflect.MakeMap(d.mapType)
+ iface.Set(out)
+ } else {
+ slicev := reflect.New(d.mapType).Elem()
+ if !d.mappingSlice(n, slicev) {
+ return false
+ }
+ out.Set(slicev)
+ return true
+ }
+ default:
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ mapType := d.mapType
+ if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+ d.mapType = outt
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ d.setMapIndex(n.children[i+1], out, k, e)
+ }
+ }
+ }
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+ if d.strict && out.MapIndex(k) != zeroValue {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+ return
+ }
+ out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+ outt := out.Type()
+ if outt.Elem() != mapItemType {
+ d.terror(n, yaml_MAP_TAG, out)
+ return false
+ }
+
+ mapType := d.mapType
+ d.mapType = outt
+
+ var slice []MapItem
+ var l = len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ item := MapItem{}
+ k := reflect.ValueOf(&item.Key).Elem()
+ if d.unmarshal(n.children[i], k) {
+ v := reflect.ValueOf(&item.Value).Elem()
+ if d.unmarshal(n.children[i+1], v) {
+ slice = append(slice, item)
+ }
+ }
+ }
+ out.Set(reflect.ValueOf(slice))
+ d.mapType = mapType
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ var doneFields []bool
+ if d.strict {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.strict {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.children[i+1], value)
+ d.setMapIndex(n.children[i+1], inlineMap, name, value)
+ } else if d.strict {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ if n.alias != nil && n.alias.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children) - 1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ if ni.alias != nil && ni.alias.kind != mappingNode {
+ failWantMap()
+ }
+ } else if ni.kind != mappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode_test.go
new file mode 100644
index 00000000000..c7d104e9082
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/decode_test.go
@@ -0,0 +1,1367 @@
+package yaml_test
+
+import (
+ "errors"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ (*struct{})(nil),
+ },
+ {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "bin: -0b1000000000000000000000000000000000000000000000000000000000000000",
+ map[string]interface{}{"bin": -9223372036854775808},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: [1, 2]",
+ &struct{ A [2]int }{[2]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // int
+ {
+ "int_max: 2147483647",
+ map[string]int{"int_max": math.MaxInt32},
+ },
+ {
+ "int_min: -2147483648",
+ map[string]int{"int_min": math.MinInt32},
+ },
+ {
+ "int_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int{},
+ },
+
+ // int64
+ {
+ "int64_max: 9223372036854775807",
+ map[string]int64{"int64_max": math.MaxInt64},
+ },
+ {
+ "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_max_base2": math.MaxInt64},
+ },
+ {
+ "int64_min: -9223372036854775808",
+ map[string]int64{"int64_min": math.MinInt64},
+ },
+ {
+ "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
+ map[string]int64{"int64_neg_base2": -math.MaxInt64},
+ },
+ {
+ "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
+ map[string]int64{},
+ },
+
+ // uint
+ {
+ "uint_min: 0",
+ map[string]uint{"uint_min": 0},
+ },
+ {
+ "uint_max: 4294967295",
+ map[string]uint{"uint_max": math.MaxUint32},
+ },
+ {
+ "uint_underflow: -1",
+ map[string]uint{},
+ },
+
+ // uint64
+ {
+ "uint64_min: 0",
+ map[string]uint{"uint64_min": 0},
+ },
+ {
+ "uint64_max: 18446744073709551615",
+ map[string]uint64{"uint64_max": math.MaxUint64},
+ },
+ {
+ "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
+ map[string]uint64{"uint64_max_base2": math.MaxUint64},
+ },
+ {
+ "uint64_maxint64: 9223372036854775807",
+ map[string]uint64{"uint64_maxint64": math.MaxInt64},
+ },
+ {
+ "uint64_underflow: -1",
+ map[string]uint64{},
+ },
+
+ // float32
+ {
+ "float32_max: 3.40282346638528859811704183484516925440e+38",
+ map[string]float32{"float32_max": math.MaxFloat32},
+ },
+ {
+ "float32_nonzero: 1.401298464324817070923729583289916131280e-45",
+ map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
+ },
+ {
+ "float32_maxuint64: 18446744073709551615",
+ map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
+ },
+ {
+ "float32_maxuint64+1: 18446744073709551616",
+ map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
+ },
+
+ // float64
+ {
+ "float64_max: 1.797693134862315708145274237317043567981e+308",
+ map[string]float64{"float64_max": math.MaxFloat64},
+ },
+ {
+ "float64_nonzero: 4.940656458412465441765687928682213723651e-324",
+ map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
+ },
+ {
+ "float64_maxuint64: 18446744073709551615",
+ map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
+ },
+ {
+ "float64_maxuint64+1: 18446744073709551616",
+ map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!float 0",
+ map[string]interface{}{"v": float64(0)},
+ }, {
+ "v: !!float -1",
+ map[string]interface{}{"v": float64(-1)},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Non-specific tag (Issue #75)
+ {
+ "v: ! test",
+ map[string]interface{}{"v": "test"},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]*string{"foo": nil},
+ }, {
+ "foo: null",
+ map[string]string{"foo": ""},
+ }, {
+ "foo: null",
+ map[string]interface{}{"foo": nil},
+ },
+
+ // Support for ~
+ {
+ "foo: ~",
+ map[string]*string{"foo": nil},
+ }, {
+ "foo: ~",
+ map[string]string{"foo": ""},
+ }, {
+ "foo: ~",
+ map[string]interface{}{"foo": nil},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // Map inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C map[string]int `yaml:",inline"`
+ }{1, map[string]int{"b": 2, "c": 3}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // issue #295 (allow scalars with colons in flow mappings and sequences)
+ {
+ "a: {b: https://github.com/go-yaml/yaml}",
+ map[string]interface{}{"a": map[interface{}]interface{}{
+ "b": "https://github.com/go-yaml/yaml",
+ }},
+ },
+ {
+ "a: [https://github.com/go-yaml/yaml]",
+ map[string]interface{}{"a": []interface{}{"https://github.com/go-yaml/yaml"}},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+
+ // Issue #24.
+ {
+ "a: <foo>",
+ map[string]string{"a": "<foo>"},
+ },
+
+ // Base 60 floats are obsolete and unsupported.
+ {
+ "a: 1:1\n",
+ map[string]string{"a": "1:1"},
+ },
+
+ // Binary data.
+ {
+ "a: !!binary gIGC\n",
+ map[string]string{"a": "\x80\x81\x82"},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ }, {
+ "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
+ map[string]string{"a": strings.Repeat("\x00", 52)},
+ },
+
+ // Ordered maps.
+ {
+ "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ },
+
+ // Issue #39.
+ {
+ "a:\n b:\n c: d\n",
+ map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
+ },
+
+ // Custom map type.
+ {
+ "a: {b: c}",
+ M{"a": M{"b": "c"}},
+ },
+
+ // Support encoding.TextUnmarshaler.
+ {
+ "a: 1.2.3.4\n",
+ map[string]textUnmarshaler{"a": textUnmarshaler{S: "1.2.3.4"}},
+ },
+ {
+ "a: 2015-02-24T18:19:39Z\n",
+ map[string]textUnmarshaler{"a": textUnmarshaler{"2015-02-24T18:19:39Z"}},
+ },
+
+ // Timestamps
+ {
+ // Date only.
+ "a: 2015-01-01\n",
+ map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)},
+ },
+ {
+ // RFC3339
+ "a: 2015-02-24T18:19:39.12Z\n",
+ map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, .12e9, time.UTC)},
+ },
+ {
+ // RFC3339 with short dates.
+ "a: 2015-2-3T3:4:5Z",
+ map[string]time.Time{"a": time.Date(2015, 2, 3, 3, 4, 5, 0, time.UTC)},
+ },
+ {
+ // ISO8601 lower case t
+ "a: 2015-02-24t18:19:39Z\n",
+ map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)},
+ },
+ {
+ // space separate, no time zone
+ "a: 2015-02-24 18:19:39\n",
+ map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)},
+ },
+ // Some cases not currently handled. Uncomment these when
+ // the code is fixed.
+ // {
+ // // space separated with time zone
+ // "a: 2001-12-14 21:59:43.10 -5",
+ // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)},
+ // },
+ // {
+ // // arbitrary whitespace between fields
+ // "a: 2001-12-14 \t\t \t21:59:43.10 \t Z",
+ // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)},
+ // },
+ {
+ // explicit string tag
+ "a: !!str 2015-01-01",
+ map[string]interface{}{"a": "2015-01-01"},
+ },
+ {
+ // explicit timestamp tag on quoted string
+ "a: !!timestamp \"2015-01-01\"",
+ map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)},
+ },
+ {
+ // explicit timestamp tag on unquoted string
+ "a: !!timestamp 2015-01-01",
+ map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)},
+ },
+ {
+ // quoted string that's a valid timestamp
+ "a: \"2015-01-01\"",
+ map[string]interface{}{"a": "2015-01-01"},
+ },
+ {
+ // explicit timestamp tag into interface.
+ "a: !!timestamp \"2015-01-01\"",
+ map[string]interface{}{"a": "2015-01-01"},
+ },
+ {
+ // implicit timestamp tag into interface.
+ "a: 2015-01-01",
+ map[string]interface{}{"a": "2015-01-01"},
+ },
+
+ // Encode empty lists as zero-length slices.
+ {
+ "a: []",
+ &struct{ A []int }{[]int{}},
+ },
+
+ // UTF-16-LE
+ {
+ "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00",
+ M{"ñoño": "very yes"},
+ },
+ // UTF-16-LE with surrogate.
+ {
+ "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00",
+ M{"ñoño": "very yes 🟔"},
+ },
+
+ // UTF-16-BE
+ {
+ "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n",
+ M{"ñoño": "very yes"},
+ },
+ // UTF-16-BE with surrogate.
+ {
+ "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n",
+ M{"ñoño": "very yes 🟔"},
+ },
+
+ // This *is* in fact a float number, per the spec. #171 was a mistake.
+ {
+ "a: 123456e1\n",
+ M{"a": 123456e1},
+ }, {
+ "a: 123456E1\n",
+ M{"a": 123456E1},
+ },
+ // yaml-test-suite 3GZX: Spec Example 7.1. Alias Nodes
+ {
+ "First occurrence: &anchor Foo\nSecond occurrence: *anchor\nOverride anchor: &anchor Bar\nReuse anchor: *anchor\n",
+ map[interface{}]interface{}{
+ "Reuse anchor": "Bar",
+ "First occurrence": "Foo",
+ "Second occurrence": "Foo",
+ "Override anchor": "Bar",
+ },
+ },
+ // Single document with garbage following it.
+ {
+ "---\nhello\n...\n}not yaml",
+ "hello",
+ },
+ {
+ "a: 5\n",
+ &struct{ A jsonNumberT }{"5"},
+ },
+ {
+ "a: 5.5\n",
+ &struct{ A jsonNumberT }{"5.5"},
+ },
+ {
+ `
+a:
+ b
+b:
+ ? a
+ : a`,
+ &M{"a": "b",
+ "b": M{
+ "a": "a",
+ }},
+ },
+}
+
+type M map[interface{}]interface{}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for i, item := range unmarshalTests {
+ c.Logf("test %d: %q", i, item.data)
+ t := reflect.ValueOf(item.value).Type()
+ value := reflect.New(t)
+ err := yaml.Unmarshal([]byte(item.data), value.Interface())
+ if _, ok := err.(*yaml.TypeError); !ok {
+ c.Assert(err, IsNil)
+ }
+ c.Assert(value.Elem().Interface(), DeepEquals, item.value, Commentf("error: %v", err))
+ }
+}
+
+// TODO(v3): This test should also work when unmarshaling onto an interface{}.
+func (s *S) TestUnmarshalFullTimestamp(c *C) {
+ // Full timestamp in same format as encoded. This is confirmed to be
+ // properly decoded by Python as a timestamp as well.
+ var str = "2015-02-24T18:19:39.123456789-03:00"
+ var t time.Time
+ err := yaml.Unmarshal([]byte(str), &t)
+ c.Assert(err, IsNil)
+ c.Assert(t, Equals, time.Date(2015, 2, 24, 18, 19, 39, 123456789, t.Location()))
+ c.Assert(t.In(time.UTC), Equals, time.Date(2015, 2, 24, 21, 19, 39, 123456789, time.UTC))
+}
+
+func (s *S) TestDecoderSingleDocument(c *C) {
+ // Test that Decoder.Decode works as expected on
+ // all the unmarshal tests.
+ for i, item := range unmarshalTests {
+ c.Logf("test %d: %q", i, item.data)
+ if item.data == "" {
+ // Behaviour differs when there's no YAML.
+ continue
+ }
+ t := reflect.ValueOf(item.value).Type()
+ value := reflect.New(t)
+ err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(value.Interface())
+ if _, ok := err.(*yaml.TypeError); !ok {
+ c.Assert(err, IsNil)
+ }
+ c.Assert(value.Elem().Interface(), DeepEquals, item.value)
+ }
+}
+
+var decoderTests = []struct {
+ data string
+ values []interface{}
+}{{
+ "",
+ nil,
+}, {
+ "a: b",
+ []interface{}{
+ map[interface{}]interface{}{"a": "b"},
+ },
+}, {
+ "---\na: b\n...\n",
+ []interface{}{
+ map[interface{}]interface{}{"a": "b"},
+ },
+}, {
+ "---\n'hello'\n...\n---\ngoodbye\n...\n",
+ []interface{}{
+ "hello",
+ "goodbye",
+ },
+}}
+
+func (s *S) TestDecoder(c *C) {
+ for i, item := range decoderTests {
+ c.Logf("test %d: %q", i, item.data)
+ var values []interface{}
+ dec := yaml.NewDecoder(strings.NewReader(item.data))
+ for {
+ var value interface{}
+ err := dec.Decode(&value)
+ if err == io.EOF {
+ break
+ }
+ c.Assert(err, IsNil)
+ values = append(values, value)
+ }
+ c.Assert(values, DeepEquals, item.values)
+ }
+}
+
+type errReader struct{}
+
+func (errReader) Read([]byte) (int, error) {
+ return 0, errors.New("some read error")
+}
+
+func (s *S) TestDecoderReadError(c *C) {
+ err := yaml.NewDecoder(errReader{}).Decode(&struct{}{})
+ c.Assert(err, ErrorMatches, `yaml: input error: some read error`)
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
+ {"v: [A,", "yaml: line 1: did not find expected node content"},
+ {"v:\n- [A,", "yaml: line 2: did not find expected node content"},
+ {"a:\n- b: *,", "yaml: line 2: did not find expected alphabetic or numeric character"},
+ {"a: *b\n", "yaml: unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
+ {"a: &x null\n<<:\n- *x\nb: &x {}\n", `yaml: map merge requires map or sequence of maps as the value`}, // Issue #529.
+ {"value: -", "yaml: block sequence entries are not allowed in this context"},
+ {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
+ {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
+ {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
+ {"b: *a\na: &a {c: 1}", `yaml: unknown anchor 'a' referenced`},
+ {"%TAG !%79! tag:yaml.org,2002:\n---\nv: !%79!int '1'", "yaml: did not find expected whitespace"},
+ {"a:\n 1:\nb\n 2:", ".*could not find expected ':'"},
+ {
+ "a: &a [00,00,00,00,00,00,00,00,00]\n" +
+ "b: &b [*a,*a,*a,*a,*a,*a,*a,*a,*a]\n" +
+ "c: &c [*b,*b,*b,*b,*b,*b,*b,*b,*b]\n" +
+ "d: &d [*c,*c,*c,*c,*c,*c,*c,*c,*c]\n" +
+ "e: &e [*d,*d,*d,*d,*d,*d,*d,*d,*d]\n" +
+ "f: &f [*e,*e,*e,*e,*e,*e,*e,*e,*e]\n" +
+ "g: &g [*f,*f,*f,*f,*f,*f,*f,*f,*f]\n" +
+ "h: &h [*g,*g,*g,*g,*g,*g,*g,*g,*g]\n" +
+ "i: &i [*h,*h,*h,*h,*h,*h,*h,*h,*h]\n",
+ "yaml: document contains excessive aliasing",
+ },
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for i, item := range unmarshalErrorTests {
+ c.Logf("test %d: %q", i, item.data)
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+
+ if strings.Contains(item.data, ":") {
+ // Repeat test with typed value.
+ var value map[string]interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+ }
+}
+
+func (s *S) TestDecoderErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(&value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var unmarshalerTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+ {`_: ""`, "!!str", ""},
+}
+
+var unmarshalerResult = map[int]error{}
+
+type unmarshalerType struct {
+ value interface{}
+}
+
+func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
+ if err := unmarshal(&o.value); err != nil {
+ return err
+ }
+ if i, ok := o.value.(int); ok {
+ if result, ok := unmarshalerResult[i]; ok {
+ return result
+ }
+ }
+ return nil
+}
+
+type unmarshalerPointer struct {
+ Field *unmarshalerType "_"
+}
+
+type unmarshalerValue struct {
+ Field unmarshalerType "_"
+}
+
+func (s *S) TestUnmarshalerPointerField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerPointer{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ if item.value == nil {
+ c.Assert(obj.Field, IsNil)
+ } else {
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalerValueField(c *C) {
+ for _, item := range unmarshalerTests {
+ obj := &unmarshalerValue{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalerWholeDocument(c *C) {
+ obj := &unmarshalerType{}
+ err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
+ c.Assert(err, IsNil)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
+ c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
+}
+
+func (s *S) TestUnmarshalerTypeError(c *C) {
+ unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
+ unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
+ defer func() {
+ delete(unmarshalerResult, 2)
+ delete(unmarshalerResult, 4)
+ }()
+
+ type T struct {
+ Before int
+ After int
+ M map[string]*unmarshalerType
+ }
+ var v T
+ data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " foo\n"+
+ " bar\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+ c.Assert(v.M["abc"], NotNil)
+ c.Assert(v.M["def"], IsNil)
+ c.Assert(v.M["ghi"], NotNil)
+ c.Assert(v.M["jkl"], IsNil)
+
+ c.Assert(v.M["abc"].value, Equals, 1)
+ c.Assert(v.M["ghi"].value, Equals, 3)
+}
+
+type proxyTypeError struct{}
+
+func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ var a int32
+ var b int64
+ if err := unmarshal(&s); err != nil {
+ panic(err)
+ }
+ if s == "a" {
+ if err := unmarshal(&b); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&a)
+ }
+ if err := unmarshal(&a); err == nil {
+ panic("should have failed")
+ }
+ return unmarshal(&b)
+}
+
+func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
+ type T struct {
+ Before int
+ After int
+ M map[string]*proxyTypeError
+ }
+ var v T
+ data := `{before: A, m: {abc: a, def: b}, after: B}`
+ err := yaml.Unmarshal([]byte(data), &v)
+ c.Assert(err, ErrorMatches, ""+
+ "yaml: unmarshal errors:\n"+
+ " line 1: cannot unmarshal !!str `A` into int\n"+
+ " line 1: cannot unmarshal !!str `a` into int32\n"+
+ " line 1: cannot unmarshal !!str `b` into int64\n"+
+ " line 1: cannot unmarshal !!str `B` into int")
+}
+
+type failingUnmarshaler struct{}
+
+var failingErr = errors.New("failingErr")
+
+func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ return failingErr
+}
+
+func (s *S) TestUnmarshalerError(c *C) {
+ err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+type sliceUnmarshaler []int
+
+func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var slice []int
+ err := unmarshal(&slice)
+ if err == nil {
+ *su = slice
+ return nil
+ }
+
+ var intVal int
+ err = unmarshal(&intVal)
+ if err == nil {
+ *su = []int{intVal}
+ return nil
+ }
+
+ return err
+}
+
+func (s *S) TestUnmarshalerRetry(c *C) {
+ var su sliceUnmarshaler
+ err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
+
+ err = yaml.Unmarshal([]byte("1"), &su)
+ c.Assert(err, IsNil)
+ c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ list:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ !<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[interface{}]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+var unmarshalNullTests = []func() interface{}{
+ func() interface{} { var v interface{}; v = "v"; return &v },
+ func() interface{} { var s = "s"; return &s },
+ func() interface{} { var s = "s"; sptr := &s; return &sptr },
+ func() interface{} { var i = 1; return &i },
+ func() interface{} { var i = 1; iptr := &i; return &iptr },
+ func() interface{} { m := map[string]int{"s": 1}; return &m },
+ func() interface{} { m := map[string]int{"s": 1}; return m },
+}
+
+func (s *S) TestUnmarshalNull(c *C) {
+ for _, test := range unmarshalNullTests {
+ item := test()
+ zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
+ err := yaml.Unmarshal([]byte("null"), item)
+ c.Assert(err, IsNil)
+ if reflect.TypeOf(item).Kind() == reflect.Map {
+ c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
+ } else {
+ c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
+ }
+ }
+}
+
+func (s *S) TestUnmarshalSliceOnPreset(c *C) {
+ // Issue #48.
+ v := struct{ A []int }{[]int{1}}
+ yaml.Unmarshal([]byte("a: [2]"), &v)
+ c.Assert(v.A, DeepEquals, []int{2})
+}
+
+var unmarshalStrictTests = []struct {
+ data string
+ value interface{}
+ error string
+}{{
+ data: "a: 1\nc: 2\n",
+ value: struct{ A, B int }{A: 1},
+ error: `yaml: unmarshal errors:\n line 2: field c not found in type struct { A int; B int }`,
+}, {
+ data: "a: 1\nb: 2\na: 3\n",
+ value: struct{ A, B int }{A: 3, B: 2},
+ error: `yaml: unmarshal errors:\n line 3: field a already set in type struct { A int; B int }`,
+}, {
+ data: "c: 3\na: 1\nb: 2\nc: 4\n",
+ value: struct {
+ A int
+ inlineB `yaml:",inline"`
+ }{
+ A: 1,
+ inlineB: inlineB{
+ B: 2,
+ inlineC: inlineC{
+ C: 4,
+ },
+ },
+ },
+ error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`,
+}, {
+ data: "c: 0\na: 1\nb: 2\nc: 1\n",
+ value: struct {
+ A int
+ inlineB `yaml:",inline"`
+ }{
+ A: 1,
+ inlineB: inlineB{
+ B: 2,
+ inlineC: inlineC{
+ C: 1,
+ },
+ },
+ },
+ error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`,
+}, {
+ data: "c: 1\na: 1\nb: 2\nc: 3\n",
+ value: struct {
+ A int
+ M map[string]interface{} `yaml:",inline"`
+ }{
+ A: 1,
+ M: map[string]interface{}{
+ "b": 2,
+ "c": 3,
+ },
+ },
+ error: `yaml: unmarshal errors:\n line 4: key "c" already set in map`,
+}, {
+ data: "a: 1\n9: 2\nnull: 3\n9: 4",
+ value: map[interface{}]interface{}{
+ "a": 1,
+ nil: 3,
+ 9: 4,
+ },
+ error: `yaml: unmarshal errors:\n line 4: key 9 already set in map`,
+}}
+
+func (s *S) TestUnmarshalStrict(c *C) {
+ for i, item := range unmarshalStrictTests {
+ c.Logf("test %d: %q", i, item.data)
+ // First test that normal Unmarshal unmarshals to the expected value.
+ t := reflect.ValueOf(item.value).Type()
+ value := reflect.New(t)
+ err := yaml.Unmarshal([]byte(item.data), value.Interface())
+ c.Assert(err, Equals, nil)
+ c.Assert(value.Elem().Interface(), DeepEquals, item.value)
+
+ // Then test that UnmarshalStrict fails on the same thing.
+ t = reflect.ValueOf(item.value).Type()
+ value = reflect.New(t)
+ err = yaml.UnmarshalStrict([]byte(item.data), value.Interface())
+ c.Assert(err, ErrorMatches, item.error)
+ }
+}
+
+type textUnmarshaler struct {
+ S string
+}
+
+func (t *textUnmarshaler) UnmarshalText(s []byte) error {
+ t.S = string(s)
+ return nil
+}
+
+func (s *S) TestFuzzCrashers(c *C) {
+ cases := []string{
+ // runtime error: index out of range
+ "\"\\0\\\r\n",
+
+ // should not happen
+ " 0: [\n] 0",
+ "? ? \"\n\" 0",
+ " - {\n000}0",
+ "0:\n 0: [0\n] 0",
+ " - \"\n000\"0",
+ " - \"\n000\"\"",
+ "0:\n - {\n000}0",
+ "0:\n - \"\n000\"0",
+ "0:\n - \"\n000\"\"",
+
+ // runtime error: index out of range
+ " \ufeff\n",
+ "? \ufeff\n",
+ "? \ufeff:\n",
+ "0: \ufeff\n",
+ "? \ufeff: \ufeff\n",
+ }
+ for _, data := range cases {
+ var v interface{}
+ _ = yaml.Unmarshal([]byte(data), &v)
+ }
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/emitterc.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 00000000000..a1c2cc52627
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 00000000000..0ee738e11b6
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+ Float64() (float64, error)
+ Int64() (int64, error)
+ String() string
+}
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ // doneInit holds whether the initial stream_start_event has been
+ // emitted.
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch m := iface.(type) {
+ case jsonNumber:
+ integer, err := m.Int64()
+ if err == nil {
+ // In this case the json.Number is a valid int64
+ in = reflect.ValueOf(integer)
+ break
+ }
+ float, err := m.Float64()
+ if err == nil {
+ // In this case the json.Number is a valid float64
+ in = reflect.ValueOf(float)
+ break
+ }
+ // fallback case - no number could be obtained
+ in = reflect.ValueOf(m.String())
+ case time.Time, *time.Time:
+ // Although time.Time implements TextMarshaler,
+ // we don't want to treat it as a string for YAML
+ // purposes because YAML has special support for
+ // timestamps.
+ case Marshaler:
+ v, err := m.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(v)
+ case encoding.TextMarshaler:
+ text, err := m.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.Type() == ptrTimeType {
+ e.timev(tag, in.Elem())
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ if in.Type() == timeType {
+ e.timev(tag, in)
+ } else {
+ e.structv(tag, in)
+ }
+ case reflect.Slice, reflect.Array:
+ if in.Type().Elem() == mapItemType {
+ e.itemsv(tag, in)
+ } else {
+ e.slicev(tag, in)
+ }
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+ for _, item := range slice {
+ e.marshal("", reflect.ValueOf(item.Key))
+ e.marshal("", reflect.ValueOf(item.Value))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == yaml_BINARY_TAG {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = yaml_BINARY_TAG
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode_test.go
new file mode 100644
index 00000000000..f7c91ff0020
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/encode_test.go
@@ -0,0 +1,630 @@
+package yaml_test
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "net"
+ "os"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+)
+
+type jsonNumberT string
+
+func (j jsonNumberT) Int64() (int64, error) {
+ val, err := strconv.Atoi(string(j))
+ if err != nil {
+ return 0, err
+ }
+ return int64(val), nil
+}
+
+func (j jsonNumberT) Float64() (float64, error) {
+ return strconv.ParseFloat(string(j), 64)
+}
+
+func (j jsonNumberT) String() string {
+ return string(j)
+}
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ nil,
+ "null\n",
+ }, {
+ (*marshalerType)(nil),
+ "null\n",
+ }, {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float32(0.99)},
+ "v: 0.99\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- |-\n B\n C\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct{ A [2]int }{[2]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{&struct{ X, y int }{1, 2}},
+ "a: {x: 1}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{nil},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X, y int } "a,omitempty,flow"
+ }{&struct{ X, y int }{}},
+ "a: {x: 0}\n",
+ }, {
+ &struct {
+ A struct{ X, y int } "a,omitempty,flow"
+ }{struct{ X, y int }{1, 2}},
+ "a: {x: 1}\n",
+ }, {
+ &struct {
+ A struct{ X, y int } "a,omitempty,flow"
+ }{struct{ X, y int }{0, 1}},
+ "{}\n",
+ }, {
+ &struct {
+ A float64 "a,omitempty"
+ B float64 "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ },
+ {
+ &struct {
+ T1 time.Time "t1,omitempty"
+ T2 time.Time "t2,omitempty"
+ T3 *time.Time "t3,omitempty"
+ T4 *time.Time "t4,omitempty"
+ }{
+ T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC),
+ T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)),
+ },
+ "t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n",
+ },
+ // Nil interface that implements Marshaler.
+ {
+ map[string]yaml.Marshaler{
+ "a": nil,
+ },
+ "a: null\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Map inlining
+ {
+ &struct {
+ A int
+ C map[string]int `yaml:",inline"`
+ }{1, map[string]int{"b": 2, "c": 3}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+
+ // Issue #24: bug in map merging logic.
+ {
+ map[string]string{"a": "<foo>"},
+ "a: <foo>\n",
+ },
+
+ // Issue #34: marshal unsupported base 60 floats quoted for compatibility
+ // with old YAML 1.1 parsers.
+ {
+ map[string]string{"a": "1:1"},
+ "a: \"1:1\"\n",
+ },
+
+ // Binary data.
+ {
+ map[string]string{"a": "\x00"},
+ "a: \"\\0\"\n",
+ }, {
+ map[string]string{"a": "\x80\x81\x82"},
+ "a: !!binary gIGC\n",
+ }, {
+ map[string]string{"a": strings.Repeat("\x90", 54)},
+ "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
+ },
+
+ // Ordered maps.
+ {
+ &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
+ "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
+ },
+
+ // Encode unicode as utf-8 rather than in escaped form.
+ {
+ map[string]string{"a": "你好"},
+ "a: 你好\n",
+ },
+
+ // Support encoding.TextMarshaler.
+ {
+ map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
+ "a: 1.2.3.4\n",
+ },
+ // time.Time gets a timestamp tag.
+ {
+ map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)},
+ "a: 2015-02-24T18:19:39Z\n",
+ },
+ {
+ map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))},
+ "a: 2015-02-24T18:19:39Z\n",
+ },
+ {
+ // This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag.
+ map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))},
+ "a: 2015-02-24T18:19:39.123456789-03:00\n",
+ },
+ // Ensure timestamp-like strings are quoted.
+ {
+ map[string]string{"a": "2015-02-24T18:19:39Z"},
+ "a: \"2015-02-24T18:19:39Z\"\n",
+ },
+
+ // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
+ {
+ map[string]string{"a": "b: c"},
+ "a: 'b: c'\n",
+ },
+
+ // Containing hash mark ('#') in string should be quoted
+ {
+ map[string]string{"a": "Hello #comment"},
+ "a: 'Hello #comment'\n",
+ },
+ {
+ map[string]string{"a": "你好 #comment"},
+ "a: '你好 #comment'\n",
+ },
+ {
+ map[string]interface{}{"a": jsonNumberT("5")},
+ "a: 5\n",
+ },
+ {
+ map[string]interface{}{"a": jsonNumberT("100.5")},
+ "a: 100.5\n",
+ },
+ {
+ map[string]interface{}{"a": jsonNumberT("bogus")},
+ "a: bogus\n",
+ },
+ // Ensure that strings do not wrap
+ {
+ map[string]string{"a": "abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 "},
+ "a: 'abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 '\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ defer os.Setenv("TZ", os.Getenv("TZ"))
+ os.Setenv("TZ", "UTC")
+ for i, item := range marshalTests {
+ c.Logf("test %d: %q", i, item.data)
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+func (s *S) TestEncoderSingleDocument(c *C) {
+ for i, item := range marshalTests {
+ c.Logf("test %d. %q", i, item.data)
+ var buf bytes.Buffer
+ enc := yaml.NewEncoder(&buf)
+ err := enc.Encode(item.value)
+ c.Assert(err, Equals, nil)
+ err = enc.Close()
+ c.Assert(err, Equals, nil)
+ c.Assert(buf.String(), Equals, item.data)
+ }
+}
+
+func (s *S) TestEncoderMultipleDocuments(c *C) {
+ var buf bytes.Buffer
+ enc := yaml.NewEncoder(&buf)
+ err := enc.Encode(map[string]string{"a": "b"})
+ c.Assert(err, Equals, nil)
+ err = enc.Encode(map[string]string{"c": "d"})
+ c.Assert(err, Equals, nil)
+ err = enc.Close()
+ c.Assert(err, Equals, nil)
+ c.Assert(buf.String(), Equals, "a: b\n---\nc: d\n")
+}
+
+func (s *S) TestEncoderWriteError(c *C) {
+ enc := yaml.NewEncoder(errorWriter{})
+ err := enc.Encode(map[string]string{"a": "b"})
+ c.Assert(err, ErrorMatches, `yaml: write error: some write error`) // Data not flushed yet
+}
+
+type errorWriter struct{}
+
+func (errorWriter) Write([]byte) (int, error) {
+ return 0, fmt.Errorf("some write error")
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+ panic string
+}{{
+ value: &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
+}, {
+ value: &struct {
+ A int
+ B map[string]int ",inline"
+ }{1, map[string]int{"a": 2}},
+ panic: `Can't have key "a" in inlined map; conflicts with struct field`,
+}}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ if item.panic != "" {
+ c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
+ } else {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+ }
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+var marshalerTests = []struct {
+ data string
+ value interface{}
+}{
+ {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", []interface{}{1, "A"}},
+ {"_: 10\n", 10},
+ {"_: null\n", nil},
+ {"_: BAR!\n", "BAR!"},
+}
+
+type marshalerType struct {
+ value interface{}
+}
+
+func (o marshalerType) MarshalText() ([]byte, error) {
+ panic("MarshalText called on type with MarshalYAML")
+}
+
+func (o marshalerType) MarshalYAML() (interface{}, error) {
+ return o.value, nil
+}
+
+type marshalerValue struct {
+ Field marshalerType "_"
+}
+
+func (s *S) TestMarshaler(c *C) {
+ for _, item := range marshalerTests {
+ obj := &marshalerValue{}
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestMarshalerWholeDocument(c *C) {
+ obj := &marshalerType{}
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+type failingMarshaler struct{}
+
+func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
+ return nil, failingErr
+}
+
+func (s *S) TestMarshalerError(c *C) {
+ _, err := yaml.Marshal(&failingMarshaler{})
+ c.Assert(err, Equals, failingErr)
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/0001",
+ "a/002",
+ "a/3",
+ "a/10",
+ "a/11",
+ "a/0012",
+ "a/100",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d7",
+ "d7abc",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
+
+func newTime(t time.Time) *time.Time {
+ return &t
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/example_embedded_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/example_embedded_test.go
new file mode 100644
index 00000000000..171c0931a19
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/example_embedded_test.go
@@ -0,0 +1,41 @@
+package yaml_test
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+// An example showing how to unmarshal embedded
+// structs from YAML.
+
+type StructA struct {
+ A string `yaml:"a"`
+}
+
+type StructB struct {
+ // Embedded structs are not treated as embedded in YAML by default. To do that,
+ // add the ",inline" annotation below
+ StructA `yaml:",inline"`
+ B string `yaml:"b"`
+}
+
+var data = `
+a: a string from struct A
+b: a string from struct B
+`
+
+func ExampleUnmarshal_embedded() {
+ var b StructB
+
+ err := yaml.Unmarshal([]byte(data), &b)
+ if err != nil {
+ log.Fatalf("cannot unmarshal data: %v", err)
+ }
+ fmt.Println(b.A)
+ fmt.Println(b.B)
+ // Output:
+ // a string from struct A
+ // a string from struct B
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/go.mod b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/go.mod
new file mode 100644
index 00000000000..1934e876945
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v2"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/limit_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/limit_test.go
new file mode 100644
index 00000000000..8d8ec2d2245
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/limit_test.go
@@ -0,0 +1,128 @@
+package yaml_test
+
+import (
+ "strings"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v2"
+)
+
+var limitTests = []struct {
+ name string
+ data []byte
+ error string
+}{
+ {
+ name: "1000kb of maps with 100 aliases",
+ data: []byte(`{a: &a [{a}` + strings.Repeat(`,{a}`, 1000*1024/4-100) + `], b: &b [*a` + strings.Repeat(`,*a`, 99) + `]}`),
+ error: "yaml: document contains excessive aliasing",
+ }, {
+ name: "1000kb of deeply nested slices",
+ data: []byte(strings.Repeat(`[`, 1000*1024)),
+ error: "yaml: exceeded max depth of 10000",
+ }, {
+ name: "1000kb of deeply nested maps",
+ data: []byte("x: " + strings.Repeat(`{`, 1000*1024)),
+ error: "yaml: exceeded max depth of 10000",
+ }, {
+ name: "1000kb of deeply nested indents",
+ data: []byte(strings.Repeat(`- `, 1000*1024)),
+ error: "yaml: exceeded max depth of 10000",
+ }, {
+ name: "1000kb of 1000-indent lines",
+ data: []byte(strings.Repeat(strings.Repeat(`- `, 1000)+"\n", 1024/2)),
+ },
+ {name: "1kb of maps", data: []byte(`a: &a [{a}` + strings.Repeat(`,{a}`, 1*1024/4-1) + `]`)},
+ {name: "10kb of maps", data: []byte(`a: &a [{a}` + strings.Repeat(`,{a}`, 10*1024/4-1) + `]`)},
+ {name: "100kb of maps", data: []byte(`a: &a [{a}` + strings.Repeat(`,{a}`, 100*1024/4-1) + `]`)},
+ {name: "1000kb of maps", data: []byte(`a: &a [{a}` + strings.Repeat(`,{a}`, 1000*1024/4-1) + `]`)},
+ {name: "1000kb slice nested at max-depth", data: []byte(strings.Repeat(`[`, 10000) + `1` + strings.Repeat(`,1`, 1000*1024/2-20000-1) + strings.Repeat(`]`, 10000))},
+ {name: "1000kb slice nested in maps at max-depth", data: []byte("{a,b:\n" + strings.Repeat(" {a,b:", 10000-2) + ` [1` + strings.Repeat(",1", 1000*1024/2-6*10000-1) + `]` + strings.Repeat(`}`, 10000-1))},
+ {name: "1000kb of 10000-nested lines", data: []byte(strings.Repeat(`- `+strings.Repeat(`[`, 10000)+strings.Repeat(`]`, 10000)+"\n", 1000*1024/20000))},
+}
+
+func (s *S) TestLimits(c *C) {
+ if testing.Short() {
+ return
+ }
+ for _, tc := range limitTests {
+ var v interface{}
+ err := yaml.Unmarshal(tc.data, &v)
+ if len(tc.error) > 0 {
+ c.Assert(err, ErrorMatches, tc.error, Commentf("testcase: %s", tc.name))
+ } else {
+ c.Assert(err, IsNil, Commentf("testcase: %s", tc.name))
+ }
+ }
+}
+
+func Benchmark1000KB100Aliases(b *testing.B) {
+ benchmark(b, "1000kb of maps with 100 aliases")
+}
+func Benchmark1000KBDeeplyNestedSlices(b *testing.B) {
+ benchmark(b, "1000kb of deeply nested slices")
+}
+func Benchmark1000KBDeeplyNestedMaps(b *testing.B) {
+ benchmark(b, "1000kb of deeply nested maps")
+}
+func Benchmark1000KBDeeplyNestedIndents(b *testing.B) {
+ benchmark(b, "1000kb of deeply nested indents")
+}
+func Benchmark1000KB1000IndentLines(b *testing.B) {
+ benchmark(b, "1000kb of 1000-indent lines")
+}
+func Benchmark1KBMaps(b *testing.B) {
+ benchmark(b, "1kb of maps")
+}
+func Benchmark10KBMaps(b *testing.B) {
+ benchmark(b, "10kb of maps")
+}
+func Benchmark100KBMaps(b *testing.B) {
+ benchmark(b, "100kb of maps")
+}
+func Benchmark1000KBMaps(b *testing.B) {
+ benchmark(b, "1000kb of maps")
+}
+
+func BenchmarkDeepSlice(b *testing.B) {
+ benchmark(b, "1000kb slice nested at max-depth")
+}
+
+func BenchmarkDeepFlow(b *testing.B) {
+ benchmark(b, "1000kb slice nested in maps at max-depth")
+}
+
+func Benchmark1000KBMaxDepthNested(b *testing.B) {
+ benchmark(b, "1000kb of 10000-nested lines")
+}
+
+func benchmark(b *testing.B, name string) {
+ for _, t := range limitTests {
+ if t.name != name {
+ continue
+ }
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ var v interface{}
+ err := yaml.Unmarshal(t.data, &v)
+ if len(t.error) > 0 {
+ if err == nil {
+ b.Errorf("expected error, got none")
+ } else if err.Error() != t.error {
+ b.Errorf("expected error '%s', got '%s'", t.error, err.Error())
+ }
+ } else {
+ if err != nil {
+ b.Errorf("unexpected error: %v", err)
+ }
+ }
+ }
+
+ return
+ }
+
+ b.Errorf("testcase %q not found", name)
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/parserc.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 00000000000..81d05dfe573
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/readerc.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 00000000000..7c1f5fac3db
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/resolve.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 00000000000..4120e0c9160
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+ {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+ {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+ {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+ {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+ {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", yaml_MERGE_TAG, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ // TODO This can easily be made faster and produce less garbage.
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+ return
+ case yaml_FLOAT_TAG:
+ if rtag == yaml_INT_TAG {
+ switch v := out.(type) {
+ case int64:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ case int:
+ rtag = yaml_FLOAT_TAG
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == yaml_TIMESTAMP_TAG {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return yaml_TIMESTAMP_TAG, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return yaml_FLOAT_TAG, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return yaml_INT_TAG, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return yaml_INT_TAG, int(intv)
+ } else {
+ return yaml_INT_TAG, intv
+ }
+ }
+ }
+ default:
+ panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/scannerc.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 00000000000..0b9bb6030a0
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ if parser.tokens_head != len(parser.tokens) {
+ // If queue is non-empty, check if any potential simple key may
+ // occupy the head position.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/sorter.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 00000000000..4c45e660a8f
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/suite_test.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/suite_test.go
new file mode 100644
index 00000000000..c5cf1ed4f6e
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/writerc.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 00000000000..a2dde608cb7
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yaml.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 00000000000..89650e293ac
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlh.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 00000000000..f6a9c8e34b1
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 00000000000..8110ce3c37a
--- /dev/null
+++ b/src/mongo/gotools/src/github.com/mongodb/mongo-tools/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}