summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/legacy26/buildscripts/buildlogger.py480
-rw-r--r--test/legacy26/buildscripts/cleanbb.py105
-rwxr-xr-xtest/legacy26/buildscripts/smoke.py1307
-rw-r--r--test/legacy26/buildscripts/utils.py230
-rw-r--r--test/legacy26/jstests/libs/authTestsKey1
-rw-r--r--test/legacy26/jstests/libs/ca.pem17
-rw-r--r--test/legacy26/jstests/libs/client.pem101
-rw-r--r--test/legacy26/jstests/libs/client_revoked.pem34
-rw-r--r--test/legacy26/jstests/libs/cluster-cert.pem101
-rw-r--r--test/legacy26/jstests/libs/command_line/test_parsed_options.js202
-rw-r--r--test/legacy26/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_auth.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_autosplit.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_httpinterface.json7
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_journal.json7
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_objcheck.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_paranoia.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_prealloc.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_scripting.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/enable_unixsocket.json7
-rw-r--r--test/legacy26/jstests/libs/config_files/set_profiling.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/set_replsetname.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/set_shardingrole.json5
-rw-r--r--test/legacy26/jstests/libs/config_files/set_verbosity.json5
-rw-r--r--test/legacy26/jstests/libs/crl.pem10
-rw-r--r--test/legacy26/jstests/libs/crl_client_revoked.pem12
-rw-r--r--test/legacy26/jstests/libs/crl_expired.pem10
-rw-r--r--test/legacy26/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy26/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy26/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy26/jstests/libs/fts.js18
-rw-r--r--test/legacy26/jstests/libs/fun.js32
-rw-r--r--test/legacy26/jstests/libs/geo_near_random.js99
-rw-r--r--test/legacy26/jstests/libs/grid.js171
-rw-r--r--test/legacy26/jstests/libs/key11
-rw-r--r--test/legacy26/jstests/libs/key21
-rw-r--r--test/legacy26/jstests/libs/localhostnameCN.pem101
-rw-r--r--test/legacy26/jstests/libs/localhostnameSAN.pem100
-rw-r--r--test/legacy26/jstests/libs/mockkrb5.conf13
-rw-r--r--test/legacy26/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--test/legacy26/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--test/legacy26/jstests/libs/network.js37
-rw-r--r--test/legacy26/jstests/libs/parallelTester.js259
-rw-r--r--test/legacy26/jstests/libs/password_protected.pem51
-rw-r--r--test/legacy26/jstests/libs/server.pem34
-rw-r--r--test/legacy26/jstests/libs/slow_weekly_util.js20
-rw-r--r--test/legacy26/jstests/libs/smoke.pem50
-rw-r--r--test/legacy26/jstests/libs/test_background_ops.js340
-rw-r--r--test/legacy26/jstests/libs/testconfig4
-rw-r--r--test/legacy26/jstests/libs/testconfig.json4
-rw-r--r--test/legacy26/jstests/libs/trace_missing_docs.js90
-rwxr-xr-xtest/legacy26/jstests/misc/biginsert.js18
-rw-r--r--test/legacy26/jstests/replsets/rslib.js115
-rw-r--r--test/legacy26/jstests/tool/csv1.js42
-rw-r--r--test/legacy26/jstests/tool/csvexport1.js65
-rw-r--r--test/legacy26/jstests/tool/csvexport2.js31
-rw-r--r--test/legacy26/jstests/tool/csvimport1.js40
-rw-r--r--test/legacy26/jstests/tool/data/a.tsv2
-rw-r--r--test/legacy26/jstests/tool/data/csvimport1.csv8
-rw-r--r--test/legacy26/jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--test/legacy26/jstests/tool/dumpauth.js38
-rw-r--r--test/legacy26/jstests/tool/dumpfilename1.js14
-rw-r--r--test/legacy26/jstests/tool/dumprestore1.js23
-rw-r--r--test/legacy26/jstests/tool/dumprestore10.js63
-rw-r--r--test/legacy26/jstests/tool/dumprestore3.js60
-rw-r--r--test/legacy26/jstests/tool/dumprestore4.js42
-rw-r--r--test/legacy26/jstests/tool/dumprestore6.js27
-rw-r--r--test/legacy26/jstests/tool/dumprestore7.js66
-rw-r--r--test/legacy26/jstests/tool/dumprestore8.js105
-rw-r--r--test/legacy26/jstests/tool/dumprestore9.js79
-rw-r--r--test/legacy26/jstests/tool/dumprestoreWithNoOptions.js107
-rw-r--r--test/legacy26/jstests/tool/dumprestore_auth.js35
-rw-r--r--test/legacy26/jstests/tool/dumprestore_auth2.js96
-rw-r--r--test/legacy26/jstests/tool/dumprestore_auth3.js199
-rw-r--r--test/legacy26/jstests/tool/dumpsecondary.js38
-rw-r--r--test/legacy26/jstests/tool/exportimport1.js66
-rw-r--r--test/legacy26/jstests/tool/exportimport3.js27
-rw-r--r--test/legacy26/jstests/tool/exportimport4.js57
-rw-r--r--test/legacy26/jstests/tool/exportimport5.js82
-rw-r--r--test/legacy26/jstests/tool/exportimport6.js26
-rw-r--r--test/legacy26/jstests/tool/exportimport_bigarray.js62
-rw-r--r--test/legacy26/jstests/tool/exportimport_date.js49
-rw-r--r--test/legacy26/jstests/tool/files1.js27
-rw-r--r--test/legacy26/jstests/tool/oplog1.js26
-rw-r--r--test/legacy26/jstests/tool/oplog_all_ops.js61
-rw-r--r--test/legacy26/jstests/tool/restorewithauth.js113
-rw-r--r--test/legacy26/jstests/tool/stat1.js22
-rw-r--r--test/legacy26/jstests/tool/tool1.js44
-rw-r--r--test/legacy26/jstests/tool/tool_replset.js89
-rw-r--r--test/legacy26/jstests/tool/tsv1.js32
-rw-r--r--test/legacy28/buildscripts/buildlogger.py479
-rw-r--r--test/legacy28/buildscripts/cleanbb.py105
-rwxr-xr-xtest/legacy28/buildscripts/smoke.py1343
-rw-r--r--test/legacy28/buildscripts/utils.py235
-rw-r--r--test/legacy28/jstests/libs/analyze_plan.js80
-rw-r--r--test/legacy28/jstests/libs/authTestsKey1
-rw-r--r--test/legacy28/jstests/libs/badSAN.pem48
-rw-r--r--test/legacy28/jstests/libs/ca.pem48
-rw-r--r--test/legacy28/jstests/libs/client.pem48
-rw-r--r--test/legacy28/jstests/libs/client_revoked.pem48
-rw-r--r--test/legacy28/jstests/libs/cluster_cert.pem48
-rw-r--r--test/legacy28/jstests/libs/command_line/test_parsed_options.js214
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_auth.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_dur.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_httpinterface.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_ipv6.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_journal.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_jsonp.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_jsonp.json7
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noauth.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noautosplit.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noprealloc.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_objcheck.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/disable_rest_interface.json7
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_auth.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_autosplit.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_httpinterface.json7
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_journal.json7
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_objcheck.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_paranoia.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_prealloc.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_scripting.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/enable_unixsocket.json7
-rw-r--r--test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--test/legacy28/jstests/libs/config_files/set_component_verbosity.json16
-rw-r--r--test/legacy28/jstests/libs/config_files/set_profiling.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/set_replsetname.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/set_shardingrole.json5
-rw-r--r--test/legacy28/jstests/libs/config_files/set_verbosity.json5
-rw-r--r--test/legacy28/jstests/libs/crl.pem38
-rw-r--r--test/legacy28/jstests/libs/crl_client_revoked.pem41
-rw-r--r--test/legacy28/jstests/libs/crl_expired.pem38
-rw-r--r--test/legacy28/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy28/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy28/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--test/legacy28/jstests/libs/expired.pem48
-rw-r--r--test/legacy28/jstests/libs/fts.js18
-rw-r--r--test/legacy28/jstests/libs/geo_near_random.js101
-rw-r--r--test/legacy28/jstests/libs/host_ipaddr.js38
-rw-r--r--test/legacy28/jstests/libs/key11
-rw-r--r--test/legacy28/jstests/libs/key21
-rw-r--r--test/legacy28/jstests/libs/localhostnameCN.pem48
-rw-r--r--test/legacy28/jstests/libs/localhostnameSAN.pem49
-rw-r--r--test/legacy28/jstests/libs/mockkrb5.conf13
-rw-r--r--test/legacy28/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--test/legacy28/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--test/legacy28/jstests/libs/not_yet_valid.pem48
-rw-r--r--test/legacy28/jstests/libs/parallelTester.js259
-rw-r--r--test/legacy28/jstests/libs/password_protected.pem51
-rw-r--r--test/legacy28/jstests/libs/server.pem48
-rwxr-xr-xtest/legacy28/jstests/libs/servers.js957
-rw-r--r--test/legacy28/jstests/libs/servers_misc.js357
-rw-r--r--test/legacy28/jstests/libs/slow_weekly_util.js20
-rw-r--r--test/legacy28/jstests/libs/smoke.pem48
-rw-r--r--test/legacy28/jstests/libs/test_background_ops.js340
-rw-r--r--test/legacy28/jstests/libs/testconfig6
-rw-r--r--test/legacy28/jstests/libs/testconfig.json4
-rw-r--r--test/legacy28/jstests/libs/trace_missing_docs.js90
-rw-r--r--test/legacy28/jstests/replsets/rslib.js115
-rw-r--r--test/legacy28/jstests/tool/csv1.js43
-rw-r--r--test/legacy28/jstests/tool/csvexport1.js65
-rw-r--r--test/legacy28/jstests/tool/csvexport2.js32
-rw-r--r--test/legacy28/jstests/tool/csvimport1.js41
-rw-r--r--test/legacy28/jstests/tool/data/a.tsv2
-rw-r--r--test/legacy28/jstests/tool/data/csvimport1.csv8
-rw-r--r--test/legacy28/jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--test/legacy28/jstests/tool/dumpauth.js39
-rw-r--r--test/legacy28/jstests/tool/dumpfilename1.js13
-rw-r--r--test/legacy28/jstests/tool/dumprestore1.js32
-rw-r--r--test/legacy28/jstests/tool/dumprestore10.js64
-rw-r--r--test/legacy28/jstests/tool/dumprestore3.js61
-rw-r--r--test/legacy28/jstests/tool/dumprestore4.js43
-rw-r--r--test/legacy28/jstests/tool/dumprestore6.js28
-rw-r--r--test/legacy28/jstests/tool/dumprestore7.js66
-rw-r--r--test/legacy28/jstests/tool/dumprestore8.js106
-rw-r--r--test/legacy28/jstests/tool/dumprestore9.js79
-rw-r--r--test/legacy28/jstests/tool/dumprestoreWithNoOptions.js112
-rw-r--r--test/legacy28/jstests/tool/dumprestore_auth.js99
-rw-r--r--test/legacy28/jstests/tool/dumprestore_auth2.js98
-rw-r--r--test/legacy28/jstests/tool/dumprestore_auth3.js200
-rw-r--r--test/legacy28/jstests/tool/dumprestore_excludecollections.js112
-rw-r--r--test/legacy28/jstests/tool/dumpsecondary.js39
-rw-r--r--test/legacy28/jstests/tool/exportimport1.js67
-rw-r--r--test/legacy28/jstests/tool/exportimport3.js28
-rw-r--r--test/legacy28/jstests/tool/exportimport4.js57
-rw-r--r--test/legacy28/jstests/tool/exportimport5.js82
-rw-r--r--test/legacy28/jstests/tool/exportimport6.js27
-rw-r--r--test/legacy28/jstests/tool/exportimport_bigarray.js59
-rw-r--r--test/legacy28/jstests/tool/exportimport_date.js50
-rw-r--r--test/legacy28/jstests/tool/exportimport_minkey_maxkey.js38
-rw-r--r--test/legacy28/jstests/tool/files1.js28
-rw-r--r--test/legacy28/jstests/tool/oplog1.js27
-rw-r--r--test/legacy28/jstests/tool/oplog_all_ops.js62
-rw-r--r--test/legacy28/jstests/tool/restorewithauth.js114
-rw-r--r--test/legacy28/jstests/tool/stat1.js18
-rw-r--r--test/legacy28/jstests/tool/tool1.js44
-rw-r--r--test/legacy28/jstests/tool/tool_replset.js89
-rw-r--r--test/legacy28/jstests/tool/tsv1.js33
214 files changed, 14016 insertions, 0 deletions
diff --git a/test/legacy26/buildscripts/buildlogger.py b/test/legacy26/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..a31b3e2dfa1
--- /dev/null
+++ b/test/legacy26/buildscripts/buildlogger.py
@@ -0,0 +1,480 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
+
diff --git a/test/legacy26/buildscripts/cleanbb.py b/test/legacy26/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/test/legacy26/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/test/legacy26/buildscripts/smoke.py b/test/legacy26/buildscripts/smoke.py
new file mode 100755
index 00000000000..7d5a825a80b
--- /dev/null
+++ b/test/legacy26/buildscripts/smoke.py
@@ -0,0 +1,1307 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+
+from pymongo import Connection
+from pymongo.errors import OperationFailure
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API, for a sneaky
+# purpose below.
+class Nothing(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, type, value, traceback):
+ return not isinstance(value, Exception)
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(object):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.stop()
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ return not isinstance(value, Exception)
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = Connection(port=self.port, slave_okay=True).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ import time
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ else:
+ # This function not available in Python 2.5
+ self.proc.terminate()
+ except AttributeError:
+ from os import kill
+ kill(self.proc.pid, 15)
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ Connection(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ mongod.dbhash = Connection(port=mongod.port, slave_okay=True).test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = Connection(port=master.port, slave_okay=True).test
+ sTestDB = Connection(port=slave.port, slave_okay=True).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets"]
+# look for jstests and one of the above suites separated by either posix or windows slashes
+forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs)))
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if swm == "legacy": # change when the default changes to "commands"
+ if use_write_commands or forceCommandsRE.search(path):
+ swm = "commands"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+ if argv[0].endswith( 'test' ) or argv[0].endswith( 'test.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ if start_mongod:
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ else:
+ master = Nothing()
+ try:
+ if small_oplog:
+ slave = mongod(slave=True,
+ set_parameters=set_parameters).__enter__()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ primary = Connection(port=master.port, slave_okay=True);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+ else:
+ slave = Nothing()
+
+ try:
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.__exit__(None, None, None)
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ finally:
+ slave.__exit__(None, None, None)
+ finally:
+ master.__exit__(None, None, None)
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "gle": ("gle/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['test',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'test.exe'
+ else:
+ program = 'test'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('test', 'test.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/test/legacy26/buildscripts/utils.py b/test/legacy26/buildscripts/utils.py
new file mode 100644
index 00000000000..68273ee69c8
--- /dev/null
+++ b/test/legacy26/buildscripts/utils.py
@@ -0,0 +1,230 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/test/legacy26/jstests/libs/authTestsKey b/test/legacy26/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/test/legacy26/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/test/legacy26/jstests/libs/ca.pem b/test/legacy26/jstests/libs/ca.pem
new file mode 100644
index 00000000000..f739ef0627b
--- /dev/null
+++ b/test/legacy26/jstests/libs/ca.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w
+DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0
+IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz
+MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI
+DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH
+ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx
+GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB
+jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27
+nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz
+hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN
+BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM
+hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB
+2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E
+qQ==
+-----END CERTIFICATE-----
diff --git a/test/legacy26/jstests/libs/client.pem b/test/legacy26/jstests/libs/client.pem
new file mode 100644
index 00000000000..85ace4fd40b
--- /dev/null
+++ b/test/legacy26/jstests/libs/client.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 7 (0x7)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 23 14:55:32 2013 GMT
+ Not After : Jan 7 14:55:32 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a:
+ 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1:
+ 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51:
+ 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f:
+ 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79:
+ 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c:
+ 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae:
+ 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd:
+ c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53:
+ a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31:
+ 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35:
+ 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98:
+ be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18:
+ b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe:
+ a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15:
+ 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2:
+ 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4:
+ 6e:a7
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d:
+ f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5:
+ f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db:
+ af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87:
+ 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35:
+ 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa:
+ 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85:
+ 24:18
+-----BEGIN CERTIFICATE-----
+MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0
+NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET
+MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b
+qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM
+zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V
+rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad
+STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B
+MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ
+BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0
+aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw
+FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54
+xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb
+r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh
+9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee
+p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y
+LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j
+mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW
+WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9
+jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+
+flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4
+H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m
+2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4
+tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU
+w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S
+eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/
+vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC
+yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn
+LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s
+9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo
+czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS
+q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop
+59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4
+9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9
+SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn
+X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU
+0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52
+re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT
+F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3
+57rGT6p0OuM8qbrTzpv3JMrm
+-----END PRIVATE KEY-----
diff --git a/test/legacy26/jstests/libs/client_revoked.pem b/test/legacy26/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..276e62644b6
--- /dev/null
+++ b/test/legacy26/jstests/libs/client_revoked.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBDDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MjUzMVoXDTQxMDQyMjE1MjUzMVowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZjbGllbnQwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBALX6DqSWRJBEJJRIRqG5X3cFHzse5jGIdV8fTqikaVitvuhs
+15z1njzfqBQZMJBCEvNb4eaenXJRMBDkEOcbfy6ah+ZLLqGFy7b6OxTROfx++3fT
+gsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN/ufbH2sX451nVd+j6oAz0dTz7RvhAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjciYidtPfd5ILsm7c2yYGV99vwjAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCgs74YrlZ6nivONRO8tNWi+gJ1TcWbQV+5yfF7Ispxo1TFxpa6GTWeZA3X4CwK
+PHmCdhb+oZoi59Qny0KECxtBj6zwdYIKLN0gIFYygaGX5J+YrRVatTjCJUHz9fco
+hZwApLEUkYg2Ldvbg+FncDwiVhi74OW685SkThNIulmPcQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALX6DqSWRJBEJJRI
+RqG5X3cFHzse5jGIdV8fTqikaVitvuhs15z1njzfqBQZMJBCEvNb4eaenXJRMBDk
+EOcbfy6ah+ZLLqGFy7b6OxTROfx++3fTgsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN
+/ufbH2sX451nVd+j6oAz0dTz7RvhAgMBAAECgYEAmHRy+g5uSJLeNmBK1EiSIwtm
+e8hKP+s7scJvyrdbDpEZJG2zQWtA82zIynXECsdgSwOKQQRXkaNU6oG3a3bM19uY
+0CqFRb9EwOLIStp+CM5zLRGmUr73u/+JrBPUWWFJkJvINvTXt18CMnCmosTvygWB
+IBZqsuEXQ6JcejxzQ6UCQQDdVUNdE2JgHp1qrr5l8563dztcrfCxuVFtgsj6qnhd
+UrBAa388B9kn4yVAe2i55xFmtHsO9Bz3ViiDFO163SafAkEA0nq8PeZtcIlZ2c7+
+6/Vdw1uLE5APVG2H9VEZdaVvkwIIXo8WQfMwWo5MQyPjVyBhUGlDwnKa46AcuplJ
+2XMtfwJBAIDrMfKb4Ng13OEP6Yz+yvr4MxZ3plQOqlRMMn53HubUzB6pvpGbzKwE
+DWWyvDxUT/lvtKHwJJMYlz5KyUygVecCQHr50RBNmLW+2muDILiWlOD2lIyqh/pp
+QJ2Zc8mkDkuTTXaKHZQM1byjFXXI+yRFu/Xyeu+abFsAiqiPtXFCdVsCQHai+Ykv
+H3y0mUJmwBVP2fBE3GiTGlaadM0auZKu7/ad+yo7Hv8Kibacwibzrj9PjT3mFSSF
+vujX1oWOaxAMVbE=
+-----END PRIVATE KEY-----
diff --git a/test/legacy26/jstests/libs/cluster-cert.pem b/test/legacy26/jstests/libs/cluster-cert.pem
new file mode 100644
index 00000000000..74dc9845e3d
--- /dev/null
+++ b/test/legacy26/jstests/libs/cluster-cert.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 5 (0x5)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 7 17:19:17 2013 GMT
+ Not After : Dec 22 17:19:17 2040 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=clustertest
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:98:ec:01:6e:f4:ae:8e:16:c8:87:a2:44:86:a0:
+ 45:5c:ca:82:56:ba:0d:a9:60:bf:07:40:da:db:70:
+ 33:a6:c2:ec:9d:e1:f0:da:fe:b9:f9:ac:23:33:64:
+ e6:63:71:cc:a2:0d:eb:86:bc:31:32:aa:30:e6:1d:
+ 5d:6d:fd:45:f4:2f:dc:72:93:bc:92:27:f7:6a:5a:
+ 18:04:f7:64:d0:6a:3c:a9:14:f6:9e:9d:58:26:f4:
+ 16:93:7e:3d:2e:3c:9e:54:41:4d:1a:e1:bd:b4:cf:
+ d0:05:4c:4d:15:fb:5c:70:1e:0c:32:6d:d7:67:5b:
+ ec:b2:61:83:e3:f0:b1:78:aa:30:45:86:f9:6d:f5:
+ 48:1f:f1:90:06:25:db:71:ed:af:d7:0d:65:65:70:
+ 89:d4:c8:c8:23:a0:67:22:de:d9:6e:1d:44:38:cf:
+ 0f:eb:2c:fe:79:01:d7:98:15:5f:22:42:3f:ee:c9:
+ 16:eb:b9:25:08:9a:2a:11:74:47:e0:51:75:8c:ae:
+ eb:8d:b5:30:fe:48:98:0a:9e:ba:6e:a4:60:08:81:
+ c6:05:a0:97:38:70:c0:1f:b4:27:96:8e:c3:d2:c1:
+ 14:5f:34:16:91:7d:ad:4c:e9:23:07:f0:42:86:78:
+ 11:a1:1e:9d:f3:d0:41:09:06:7d:5c:89:ef:d2:0d:
+ 6c:d5
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ C9:00:3A:28:CC:6A:75:57:82:81:00:A6:25:48:6C:CE:0A:A0:4A:59
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ d1:55:e3:5c:43:8c:4f:d3:29:8d:74:4a:1d:23:50:17:27:b3:
+ 30:6f:c6:d7:4c:6c:96:7e:52:a0:2f:91:92:b3:f5:4c:a1:ca:
+ 88:62:31:e4:d6:64:ac:40:17:47:00:24:e8:0d:3b:7b:c7:d4:
+ 7f:3a:76:45:27:fd:9b:ae:9d:44:71:8f:ab:62:60:e5:9d:e8:
+ 59:dd:0e:25:17:14:f8:83:b0:b6:fc:5f:27:8b:69:a2:dc:31:
+ b9:17:a1:27:92:96:c1:73:bf:a3:f0:b8:97:b9:e2:fb:97:6d:
+ 44:01:b0:68:68:47:4b:84:56:3b:19:66:f8:0b:6c:1b:f5:44:
+ a9:ae
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAuCgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgwNzE3
+MTkxN1oXDTQwMTIyMjE3MTkxN1owbzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRQwEgYDVQQDDAtjbHVzdGVydGVzdDCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJjsAW70ro4WyIeiRIagRVzKgla6DalgvwdA
+2ttwM6bC7J3h8Nr+ufmsIzNk5mNxzKIN64a8MTKqMOYdXW39RfQv3HKTvJIn92pa
+GAT3ZNBqPKkU9p6dWCb0FpN+PS48nlRBTRrhvbTP0AVMTRX7XHAeDDJt12db7LJh
+g+PwsXiqMEWG+W31SB/xkAYl23Htr9cNZWVwidTIyCOgZyLe2W4dRDjPD+ss/nkB
+15gVXyJCP+7JFuu5JQiaKhF0R+BRdYyu6421MP5ImAqeum6kYAiBxgWglzhwwB+0
+J5aOw9LBFF80FpF9rUzpIwfwQoZ4EaEenfPQQQkGfVyJ79INbNUCAwEAAaN7MHkw
+CQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2Vy
+dGlmaWNhdGUwHQYDVR0OBBYEFMkAOijManVXgoEApiVIbM4KoEpZMB8GA1UdIwQY
+MBaAFAdBGTqffsW3Ik63vNXf5PwJuGQWMA0GCSqGSIb3DQEBBQUAA4GBANFV41xD
+jE/TKY10Sh0jUBcnszBvxtdMbJZ+UqAvkZKz9UyhyohiMeTWZKxAF0cAJOgNO3vH
+1H86dkUn/ZuunURxj6tiYOWd6FndDiUXFPiDsLb8XyeLaaLcMbkXoSeSlsFzv6Pw
+uJe54vuXbUQBsGhoR0uEVjsZZvgLbBv1RKmu
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCY7AFu9K6OFsiH
+okSGoEVcyoJWug2pYL8HQNrbcDOmwuyd4fDa/rn5rCMzZOZjccyiDeuGvDEyqjDm
+HV1t/UX0L9xyk7ySJ/dqWhgE92TQajypFPaenVgm9BaTfj0uPJ5UQU0a4b20z9AF
+TE0V+1xwHgwybddnW+yyYYPj8LF4qjBFhvlt9Ugf8ZAGJdtx7a/XDWVlcInUyMgj
+oGci3tluHUQ4zw/rLP55AdeYFV8iQj/uyRbruSUImioRdEfgUXWMruuNtTD+SJgK
+nrpupGAIgcYFoJc4cMAftCeWjsPSwRRfNBaRfa1M6SMH8EKGeBGhHp3z0EEJBn1c
+ie/SDWzVAgMBAAECggEAfogRK5Dz+gfqByiCEO7+VagOrtolwbeWeNb2AEpXwq1Z
+Ac5Y76uDkI4ZVkYvx6r6ykBAWOzQvH5MFavIieDeiA0uF/QcPMcrFmnTpBBb74No
+C/OXmGjS7vBa2dHDp8VqsIaT2SFeSgUFt8yJoB2rP+3s47E1YYWTVYoQioO3JQJN
+f0mSuvTnvJO9lbTWiW+yWGVkQvIciCCnHkCEwU0fHht8IoFBGNFlpWZcGiMeietr
+16GdRcmAq95q8TTCeQxkgmmL+0ZJ1BrF7llG2pGYdacawXj1eVRqOHQaFIlcKe05
+RITpuXVYOWBpBpfbQsBZaCGLe7WxHJedrFxdbqm0ZQKBgQDLUQrmIl2wz43t3sI+
+WjW6y1GwMPG9EjXUT1Boq6PNHKgw04/32QNn5IMmz4cp2Mgyz7Hc0ABDU/ZATujd
+yCkxVErPbKRDKSxSl6nLXtLpLbHFmVPfKPbNKIuyFMBsOFOtoFoVbo33wI5dI7aO
+i7sTGB3ngbq4pzCJ9dVt/t81QwKBgQDAjAtBXS8WB69l9w35tx+MgYG0LJ+ykAug
+d91pwiWqSt02fZ0nr/S/76G6B4C8eqeOnYh1RzF5isLD246rLD2Y+uuFrgasvSiS
+4qSKbpG2kk02R/DRTAglAyXI0rhYIDrYKCQPWqNMWpawT/FQQwbFjTuhmz10FyXS
+hmVztZWoBwKBgQCBdnptLibghllGxViEoaai6gJ7Ib9ceHMEXPjDnb+wxPWoGZ8L
+4AjWJ+EHXpAfqmVYTX5hL6VrOdSNAHIxftoUCiuUxwYVqesKMH6y/A9q4WjYfRi1
++fyliJLjc2lPv9IwtfGGwh3uS5ObZTlCrWES+IFaP/YozHUQ9BPSdb+lxwKBgB35
+Lv9b3CqXw6why2EmKpkax/AeSjXnyoeOYT9HY8mgodMLtt0ovPbr/McSx+2PQmon
+B8kJ7h+3hB4tHYZz+prH5MYIky1svNYwxeBu2ewL1k0u4cQTC+mHFeivNNczHTXs
++cASIf2O1IpZx3zxEirKk4/StLxPpimhlkVu7P8dAoGBAJVw2U70+PagVBPtvheu
+ZDEvxSEzrn90ivIh7Y6ZIwdSOSLW04sOVL2JAzO155u4g77jdmcxV3urr1vD9LbF
+qkBGLXx7FFC/Mn/H42qerxr16Bt6RtvVpms71UIQLYxA7caab9cqoyt0wkgqJFKX
+fj0TVODnIf+zPMDCu+frpLbA
+-----END PRIVATE KEY-----
diff --git a/test/legacy26/jstests/libs/command_line/test_parsed_options.js b/test/legacy26/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..e2ca646b63a
--- /dev/null
+++ b/test/legacy26/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,202 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/test/legacy26/jstests/libs/config_files/disable_noscripting.ini b/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/test/legacy26/jstests/libs/config_files/enable_auth.json b/test/legacy26/jstests/libs/config_files/enable_auth.json
new file mode 100644
index 00000000000..9f9cc84d107
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_auth.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "authorization" : "enabled"
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_autosplit.json b/test/legacy26/jstests/libs/config_files/enable_autosplit.json
new file mode 100644
index 00000000000..a0d4f8af1be
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_autosplit.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "autoSplit" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_httpinterface.json b/test/legacy26/jstests/libs/config_files/enable_httpinterface.json
new file mode 100644
index 00000000000..c87dabe125d
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_httpinterface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json b/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json
new file mode 100644
index 00000000000..362db08edd3
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "indexBuildRetry" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_journal.json b/test/legacy26/jstests/libs/config_files/enable_journal.json
new file mode 100644
index 00000000000..d75b94ccbc7
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_journal.json
@@ -0,0 +1,7 @@
+{
+ "storage" : {
+ "journal" : {
+ "enabled" : false
+ }
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_objcheck.json b/test/legacy26/jstests/libs/config_files/enable_objcheck.json
new file mode 100644
index 00000000000..b52be7382ed
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_objcheck.json
@@ -0,0 +1,5 @@
+{
+ "net" : {
+ "wireObjectCheck" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_paranoia.json b/test/legacy26/jstests/libs/config_files/enable_paranoia.json
new file mode 100644
index 00000000000..218646b1662
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_paranoia.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "archiveMovedChunks" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_prealloc.json b/test/legacy26/jstests/libs/config_files/enable_prealloc.json
new file mode 100644
index 00000000000..15ecefbb546
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_prealloc.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "preallocDataFiles" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_scripting.json b/test/legacy26/jstests/libs/config_files/enable_scripting.json
new file mode 100644
index 00000000000..e8f32f2c23c
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_scripting.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "javascriptEnabled" : true
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/enable_unixsocket.json b/test/legacy26/jstests/libs/config_files/enable_unixsocket.json
new file mode 100644
index 00000000000..660d21eb17f
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/enable_unixsocket.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "unixDomainSocket" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/set_profiling.json b/test/legacy26/jstests/libs/config_files/set_profiling.json
new file mode 100644
index 00000000000..944f0de1575
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/set_profiling.json
@@ -0,0 +1,5 @@
+{
+ "operationProfiling" : {
+ "mode" : "all"
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/set_replsetname.json b/test/legacy26/jstests/libs/config_files/set_replsetname.json
new file mode 100644
index 00000000000..522ca2b766f
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/set_replsetname.json
@@ -0,0 +1,5 @@
+{
+ "replication" : {
+ "replSetName" : "myconfigname"
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/set_shardingrole.json b/test/legacy26/jstests/libs/config_files/set_shardingrole.json
new file mode 100644
index 00000000000..71f92f122db
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/set_shardingrole.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "clusterRole" : "configsvr"
+ }
+}
diff --git a/test/legacy26/jstests/libs/config_files/set_verbosity.json b/test/legacy26/jstests/libs/config_files/set_verbosity.json
new file mode 100644
index 00000000000..47a1cce1b03
--- /dev/null
+++ b/test/legacy26/jstests/libs/config_files/set_verbosity.json
@@ -0,0 +1,5 @@
+{
+ "systemLog" : {
+ "verbosity" : 5
+ }
+}
diff --git a/test/legacy26/jstests/libs/crl.pem b/test/legacy26/jstests/libs/crl.pem
new file mode 100644
index 00000000000..dce0a0fb3f1
--- /dev/null
+++ b/test/legacy26/jstests/libs/crl.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQ3NDFaFw00
+MDA0MjgxODQ3NDFaoA4wDDAKBgNVHRQEAwIBCzANBgkqhkiG9w0BAQUFAAOBgQAu
+PlPDGei2q6kdkoHe8vmDuts7Hm/o9LFbBmn0XUcfHisCJCPsJTyGCsgnfIiBcXJY
+1LMKsQFnYGv28rE2ZPpFg2qNxL+6qUEzCvqaHLX9q1V0F+f8hHDxucNYu52oo/h0
+uNZxB1KPFI2PReG5d3oUYqJ2+EctKkrGtxSPzbN0gg==
+-----END X509 CRL-----
diff --git a/test/legacy26/jstests/libs/crl_client_revoked.pem b/test/legacy26/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..85eeaff5543
--- /dev/null
+++ b/test/legacy26/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBujCCASMCAQEwDQYJKoZIhvcNAQEFBQAwgZIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwF
+MTBHZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3Jp
+dHkxGzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1cxcNMTMxMjA2MTUzMzUwWhcN
+MTQwMTA1MTUzMzUwWjBMMBICAQwXDTEzMTIwNjE1MjczMFowGgIJAJGUg/wuW1KD
+Fw0xMjEyMTIxODQ4MjJaMBoCCQCRlIP8LltShRcNMTIxMjEyMTg0ODUyWqAOMAww
+CgYDVR0UBAMCAQ4wDQYJKoZIhvcNAQEFBQADgYEAERPfPdQnIafo1lYbFEx2ojrb
+eYqvWN9ykTyUGq2bKv+STYiuaKUz6daGVjELjn/safn5wHkYr9+C/kRRoCor5HYw
+N3uxHnkMpl6Xn7kgXL2b0jbdvfa44faOXdH2gbhzd8bFsOMra4QJHT6CgpYb3ei1
++ePhAd1KS7tS/dyyP4c=
+-----END X509 CRL-----
diff --git a/test/legacy26/jstests/libs/crl_expired.pem b/test/legacy26/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..88307503240
--- /dev/null
+++ b/test/legacy26/jstests/libs/crl_expired.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQwNTBaFw0x
+MzAxMTExODQwNTBaoA4wDDAKBgNVHRQEAwIBAzANBgkqhkiG9w0BAQUFAAOBgQBs
+jyvEdX8o0+PfRJsEv5oLwgp5y+YmKjRlXg2oj/ETxBDKNYtBY7B9Uu9q0chFtwTu
+XMXeEFWuxnKG+4Ovp6JmNcCKkttUwsWQuR6dGpClW6ttTk0putAWtDnqukTPlEQ2
+XU3wco7ZgrTphvuGpaIQLM1sQg9x8SfW3q6/hxYm3A==
+-----END X509 CRL-----
diff --git a/test/legacy26/jstests/libs/dur_checksum_bad_first.journal b/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/test/legacy26/jstests/libs/dur_checksum_bad_last.journal b/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/test/legacy26/jstests/libs/dur_checksum_good.journal b/test/legacy26/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/test/legacy26/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/test/legacy26/jstests/libs/fts.js b/test/legacy26/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/test/legacy26/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/test/legacy26/jstests/libs/fun.js b/test/legacy26/jstests/libs/fun.js
new file mode 100644
index 00000000000..276f32a8f40
--- /dev/null
+++ b/test/legacy26/jstests/libs/fun.js
@@ -0,0 +1,32 @@
+// General high-order functions
+
+function forEach (action, array) {
+ for (var i = 0; i < array.length; i++)
+ action (array[i]);
+}
+
+function foldl (combine, base, array) {
+ for (var i = 0; i < array.length; i++)
+ base = combine (base, array[i]);
+ return base
+}
+
+function foldr (combine, base, array) {
+ for (var i = array.length - 1; i >= 0; i--)
+ base = combine (array[i], base);
+ return base
+}
+
+function map (func, array) {
+ var result = [];
+ for (var i = 0; i < array.length; i++)
+ result.push (func (array[i]));
+ return result
+}
+
+function filter (pred, array) {
+ var result = []
+ for (var i = 0; i < array.length; i++)
+ if (pred (array[i])) result.push (array[i]);
+ return result
+}
diff --git a/test/legacy26/jstests/libs/geo_near_random.js b/test/legacy26/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..60cb7733f5d
--- /dev/null
+++ b/test/legacy26/jstests/libs/geo_near_random.js
@@ -0,0 +1,99 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ for (var i=0; i<nPts; i++){
+ this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ }
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/test/legacy26/jstests/libs/grid.js b/test/legacy26/jstests/libs/grid.js
new file mode 100644
index 00000000000..3a1253d83cd
--- /dev/null
+++ b/test/legacy26/jstests/libs/grid.js
@@ -0,0 +1,171 @@
+// Grid infrastructure: Servers, ReplicaSets, ConfigSets, Shards, Routers (mongos). Convenient objects and functions on top of those in shell/servers.js -Tony
+
+load('jstests/libs/fun.js')
+load('jstests/libs/network.js')
+
+// New servers and routers take and increment port number from this.
+// A comment containing FreshPorts monad implies reading and incrementing this, IO may also read/increment this.
+var nextPort = 31000
+
+/*** Server is the spec of a mongod, ie. all its command line options.
+ To start a server call 'begin' ***/
+// new Server :: String -> FreshPorts Server
+function Server (name) {
+ this.addr = '127.0.0.1';
+ this.dirname = name + nextPort;
+ this.args = { port : nextPort++,
+ noprealloc : '',
+ smallfiles : '',
+ rest : '',
+ oplogSize : 8 }
+}
+
+// Server -> String <addr:port>
+Server.prototype.host = function() {
+ return this.addr + ':' + this.args.port
+}
+
+// Start a new server with this spec and return connection to it
+// Server -> IO Connection
+Server.prototype.begin = function() {
+ return startMongodTest(this.args.port, this.dirname, false, this.args);
+}
+
+// Stop server and remove db directory
+// Server -> IO ()
+Server.prototype.end = function() {
+ print('Stopping mongod on port ' + this.args.port)
+ stopMongod (this.args.port)
+ resetDbpath (MongoRunner.dataPath + this.dirname)
+}
+
+// Cut server from network so it is unreachable (but still alive)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function cutServer (conn) {
+ var addrport = parseHost (conn.host)
+ cutNetwork (addrport.port)
+}
+
+// Ensure server is connected to network (undo cutServer)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function uncutServer (conn) {
+ var iport = parseHost (conn.host)
+ restoreNetwork (iport.port)
+}
+
+// Kill server process at other end of this connection
+function killServer (conn, _signal) {
+ var signal = _signal || 15
+ var iport = parseHost (conn.host)
+ stopMongod (iport.port, signal)
+}
+
+/*** ReplicaSet is the spec of a replica set, ie. options given to ReplicaSetTest.
+ To start a replica set call 'begin' ***/
+// new ReplicaSet :: String -> Int -> FreshPorts ReplicaSet
+function ReplicaSet (name, numServers) {
+ this.name = name
+ this.host = '127.0.0.1'
+ this.nodes = numServers
+ this.startPort = nextPort
+ this.oplogSize = 40
+ nextPort += numServers
+}
+
+// Start a replica set with this spec and return ReplSetTest, which hold connections to the servers including the master server. Call ReplicaSetTest.stopSet() to end all servers
+// ReplicaSet -> IO ReplicaSetTest
+ReplicaSet.prototype.begin = function() {
+ var rs = new ReplSetTest(this)
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+// Create a new server and add it to replica set
+// ReplicaSetTest -> IO Connection
+ReplSetTest.prototype.addServer = function() {
+ var conn = this.add()
+ nextPort++
+ this.reInitiate()
+ this.awaitReplication(60000)
+ assert.soon(function() {
+ var doc = conn.getDB('admin').isMaster()
+ return doc['ismaster'] || doc['secondary']
+ })
+ return conn
+}
+
+/*** ConfigSet is a set of specs (Servers) for sharding config servers.
+ Supply either the servers or the number of servers desired.
+ To start the config servers call 'begin' ***/
+// new ConfigSet :: [Server] or Int -> FreshPorts ConfigSet
+function ConfigSet (configSvrsOrNumSvrs) {
+ if (typeof configSvrsOrNumSvrs == 'number') {
+ this.configSvrs = []
+ for (var i = 0; i < configSvrsOrNumSvrs; i++)
+ this.configSvrs.push (new Server ('config'))
+ } else
+ this.configSvrs = configSvrs
+}
+
+// Start config servers, return list of connections to them
+// ConfigSet -> IO [Connection]
+ConfigSet.prototype.begin = function() {
+ return map (function(s) {return s.begin()}, this.configSvrs)
+}
+
+// Stop config servers
+// ConfigSet -> IO ()
+ConfigSet.prototype.end = function() {
+ return map (function(s) {return s.end()}, this.configSvrs)
+}
+
+/*** Router is the spec for a mongos, ie, its command line options.
+ To start a router (mongos) call 'begin' ***/
+// new Router :: ConfigSet -> FreshPorts Router
+function Router (configSet) {
+ this.args = { port : nextPort++,
+ v : 0,
+ configdb : map (function(s) {return s.host()}, configSet.configSvrs) .join(','),
+ chunkSize : 1}
+}
+
+// Start router (mongos) with this spec and return connection to it.
+// Router -> IO Connection
+Router.prototype.begin = function() {
+ return startMongos (this.args);
+}
+
+// Stop router
+// Router -> IO ()
+Router.prototype.end = function() {
+ return stopMongoProgram (this.args.port)
+}
+
+// Add shard to config via router (mongos) connection. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> IO ()
+function addShard (routerConn, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({addshard: repSetOrHostName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> IO ()
+function enableSharding (routerConn, dbName) {
+ var ack = routerConn.getDB('admin').runCommand ({enablesharding: dbName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> String -> String -> IO ()
+function shardCollection (routerConn, dbName, collName, shardKey) {
+ var ack = routerConn.getDB('admin').runCommand ({shardcollection: dbName + '.' + collName, key: shardKey})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Move db from its current primary shard to given shard. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> String -> IO ()
+function moveDB (routerConn, dbname, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({moveprimary: dbname, to: repSetOrHostName})
+ printjson(ack)
+ assert (ack['ok'], tojson(ack))
+}
diff --git a/test/legacy26/jstests/libs/key1 b/test/legacy26/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/test/legacy26/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/test/legacy26/jstests/libs/key2 b/test/legacy26/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/test/legacy26/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/test/legacy26/jstests/libs/localhostnameCN.pem b/test/legacy26/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e181139b5d9
--- /dev/null
+++ b/test/legacy26/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 8 (0x8)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:31:58 2013 GMT
+ Not After : Mar 23 14:31:58 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=127.0.0.1
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:aa:e1:a0:6c:09:dc:fd:d0:9f:0f:b6:77:40:60:
+ f9:01:f9:9e:55:20:fe:88:04:93:c9:ab:96:93:3a:
+ ed:7e:7d:ad:e4:eb:a7:e9:07:35:ef:6e:14:64:dd:
+ 31:9b:e5:24:06:18:bb:60:67:e3:c5:49:8e:79:b6:
+ 78:07:c1:64:3f:de:c1:7d:1b:a9:96:35:d5:f9:b8:
+ b4:5e:2a:34:b7:d0:19:ad:f6:8a:00:ef:8e:b0:d5:
+ 36:1f:66:a0:7a:7d:cf:f0:98:3c:ee:0f:be:67:d2:
+ de:c3:e6:b8:79:2f:64:40:0c:39:15:97:8c:13:da:
+ 1b:db:5c:bb:a3:43:0b:74:c7:46:55:9b:ea:d7:93:
+ d5:15:2f:d1:34:ac:a9:99:3b:01:f0:c1:d7:42:89:
+ 24:bb:ab:60:99:c1:4d:9f:bf:9a:a3:92:3a:58:05:
+ e2:47:a6:8e:71:b2:0a:32:b0:c5:cc:a0:58:40:bf:
+ 09:a7:76:f5:37:ce:90:71:e0:75:89:17:ea:fb:80:
+ 24:a1:9d:6e:1b:7e:e3:44:52:d3:fe:e3:de:80:9a:
+ 8e:c3:4f:8c:bb:b4:8c:d2:a9:a9:aa:af:90:ac:b4:
+ ee:6b:d2:c5:71:1e:08:7f:4c:b6:2a:5f:13:7a:e3:
+ 29:f7:2e:bb:f7:c5:48:0a:4e:2e:1e:d4:2c:40:b3:
+ 4c:19
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 0E:3F:54:C4:77:85:FF:93:58:A7:24:23:32:35:73:B0:BE:8C:C3:BB
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 4c:9d:31:81:b5:e9:6a:64:4c:1e:eb:91:7f:f1:66:74:46:13:
+ 19:cb:f2:3b:9a:41:f2:83:67:32:53:a6:cd:33:37:4c:92:a6:
+ 36:d4:f3:0b:56:a2:2b:66:f1:09:a7:06:36:b8:83:b7:31:70:
+ fe:bf:af:b5:3d:59:f3:f2:18:48:c7:6c:b0:90:8c:24:47:30:
+ 53:8d:c5:3e:7c:7b:33:53:15:ec:bd:8a:83:ed:05:e8:8b:21:
+ d7:65:39:69:95:c8:58:7d:4f:1b:32:51:85:2d:4d:8b:be:00:
+ 60:17:83:9b:2b:13:43:05:78:db:a4:2e:a2:cb:31:34:7e:b9:
+ 8a:72
+-----BEGIN CERTIFICATE-----
+MIIDZDCCAs2gAwIBAgIBCDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+MzE1OFoXDTQxMDMyMzE0MzE1OFowXDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjES
+MBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAquGgbAnc/dCfD7Z3QGD5AfmeVSD+iASTyauWkzrtfn2t5Oun6Qc1724UZN0x
+m+UkBhi7YGfjxUmOebZ4B8FkP97BfRupljXV+bi0Xio0t9AZrfaKAO+OsNU2H2ag
+en3P8Jg87g++Z9Lew+a4eS9kQAw5FZeME9ob21y7o0MLdMdGVZvq15PVFS/RNKyp
+mTsB8MHXQokku6tgmcFNn7+ao5I6WAXiR6aOcbIKMrDFzKBYQL8Jp3b1N86QceB1
+iRfq+4AkoZ1uG37jRFLT/uPegJqOw0+Mu7SM0qmpqq+QrLTua9LFcR4If0y2Kl8T
+euMp9y6798VICk4uHtQsQLNMGQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU
+Dj9UxHeF/5NYpyQjMjVzsL6Mw7swHwYDVR0jBBgwFoAUB0EZOp9+xbciTre81d/k
+/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEATJ0xgbXpamRMHuuRf/FmdEYTGcvyO5pB
+8oNnMlOmzTM3TJKmNtTzC1aiK2bxCacGNriDtzFw/r+vtT1Z8/IYSMdssJCMJEcw
+U43FPnx7M1MV7L2Kg+0F6Ish12U5aZXIWH1PGzJRhS1Ni74AYBeDmysTQwV426Qu
+ossxNH65inI=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCq4aBsCdz90J8P
+tndAYPkB+Z5VIP6IBJPJq5aTOu1+fa3k66fpBzXvbhRk3TGb5SQGGLtgZ+PFSY55
+tngHwWQ/3sF9G6mWNdX5uLReKjS30Bmt9ooA746w1TYfZqB6fc/wmDzuD75n0t7D
+5rh5L2RADDkVl4wT2hvbXLujQwt0x0ZVm+rXk9UVL9E0rKmZOwHwwddCiSS7q2CZ
+wU2fv5qjkjpYBeJHpo5xsgoysMXMoFhAvwmndvU3zpBx4HWJF+r7gCShnW4bfuNE
+UtP+496Amo7DT4y7tIzSqamqr5CstO5r0sVxHgh/TLYqXxN64yn3Lrv3xUgKTi4e
+1CxAs0wZAgMBAAECggEADtdh04BXzUOdTQQP/2tstRs1ATfIY4/iNhXNEiSAFAhe
+Xg+Jmdeie5UX+FqtwFh6dH0ZaRoc0jm9Qhzy99l4F4QFUhRg+kbausGsCLGpun08
+fbt36PTlc75Q4RFMxta+hKr0P8jmRKYv6tvTEdNn5ZgqLRHofKDo4nh/Y4KjMBUq
+VIMUu+VO9Ol2GPlZVRBaJec0E1+HUyzaK5JVUIFh4atcrHyXxae+rY9o6G57BBEj
+ZzlahfMI5aYj9HhXnB8RuhVBuIZBNSA41nxHmOs6JBQsatVML51RFIV4KPU+AyDR
+bdYXHJehRIUF8RL92aHjGYsvXdSxVhuUBqMIQhOwAQKBgQDUtj+p+7SHpLyQIZpU
+EQFK+42LDc6zF4uJVjq1d8fC2Hrmz8PLs0KcH36VWNbo48B3iFiPWIMID5xwLuIb
+FkLOzJ8QrbILn0zcu/hplrCiy6PZas3rpLJ+X406wLQeCikOLhQkz+cuKuQmvWkK
+eyqwBIIxg8t5dTtTAmu3w/DDgQKBgQDNqByxKduTgEND1+isUOt+L/ipR3SzXQ4m
+ZsOKiSxyXxge0/CUxPxO6WeEVGQ7bGAr5yQD9ukvJnCo3phYcuRRj+RTMrTL73Kz
+p/cyOUx2NMUIgURTsO+s3D0lC4+NmoDge0roeEDX+/lFNjqgRKJ+1LUimqbo5uNE
+EupkyTh0mQKBgGw/81ZGSjFdnLic4TU3Ejlem0HQ3Qg3S0OxJl+DfZ2jHaiowzO/
+Hn7laD4I4BXVEfXC5Y7NtKE9kJdmxJqUUZt8dta+DoXro+oRnvHdRjcS+2eB+xmY
+z12QswbbWs6OzSXyPT4er7/HBCTS78nttGOvZ7JbKAm/p1kvOjJi/PwBAoGAE7Tw
+Sum/6Lp5t56Q5TI73rOqGE6ImEdqe7ONOVE7uRnzrcCRZTAbHVSwXrXXhPo1nP9h
+LCAU6De+w+/QmWkpB8fKEU7ilEg1rZGC1oU3FnyoBNCeQ4bI8L+J/GrHLsKHZvtp
+ii07yXaTxFYV+BWbnJu1X8OCCv9U98j4PQArMMECgYEAm6uLN647vb+ZhzNBMtsX
+1wnMSgzbgGpgjhWwk6dNmw8YJNKg9CFa8sQ8N7yKXWBEF/RkU0kfzZL8iddHEb/k
+Ti1BlwrEzFfIQLlBfv47tYWOj8ZxN0ujlzUoN2VAC25LZhjcQCo3ftBk2lkrmllu
+MxjxBfRk/teUdRl80oi5R0w=
+-----END PRIVATE KEY-----
diff --git a/test/legacy26/jstests/libs/localhostnameSAN.pem b/test/legacy26/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..beb0bb91b61
--- /dev/null
+++ b/test/legacy26/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,100 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 9 (0x9)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:45:13 2013 GMT
+ Not After : Mar 23 14:45:13 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=santesthostname.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:c9:83:7a:75:42:cf:35:a4:95:c7:c8:d8:4d:19:
+ 0e:89:87:d5:bd:f9:2f:ee:20:2c:4c:ca:6d:0b:c1:
+ 10:5b:06:1b:c4:a1:26:12:25:06:7a:1e:d1:e6:d0:
+ 91:2b:a3:c8:74:de:95:10:d9:ff:20:03:ec:84:db:
+ 49:d9:a4:e9:c2:93:f0:d2:32:01:a6:55:db:14:bf:
+ 16:fe:88:e0:e4:46:0f:6a:bd:27:95:45:2e:8d:13:
+ e2:99:09:74:e4:2b:32:c3:6d:61:0c:86:85:eb:12:
+ f5:dc:9e:7b:d3:00:a3:ce:f4:8a:4b:51:7f:a2:c6:
+ 0b:52:a4:f1:41:d5:01:53:88:99:b9:3b:29:f8:43:
+ 5e:a4:c7:41:d9:d3:34:43:f2:c7:a6:8d:22:1c:f9:
+ b2:63:cb:df:83:9c:6f:ec:e3:b0:63:af:0b:51:c9:
+ 20:ca:c2:59:c1:2c:ec:de:37:18:76:3d:73:85:82:
+ 12:11:cd:b6:ef:2f:7b:64:cd:a3:2d:f6:7a:54:7f:
+ b3:4f:c9:38:f4:62:b6:da:00:f0:59:df:e1:d3:15:
+ ca:4b:73:6c:22:c1:9a:c1:51:c4:28:59:0f:71:2a:
+ 39:e9:17:08:9d:b0:88:61:a7:53:67:da:dc:fb:6e:
+ 38:f7:a8:cd:cd:88:ed:d9:4c:88:f4:a4:75:5e:3f:
+ 8b:ff
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ OpenSSL Certificate for SSL Server
+ X509v3 Subject Alternative Name:
+ DNS:*.example.com, DNS:127.0.0.1, DNS:morefun!, IP Address:154.2.2.3, email:user@host.com
+ Signature Algorithm: sha1WithRSAEncryption
+ 0b:82:c6:7d:e0:ba:71:24:d6:a8:f4:cb:6f:0f:f6:69:28:32:
+ 98:81:e6:14:49:81:07:ff:92:dd:0a:a4:68:3c:92:00:e5:8c:
+ 43:d1:29:04:4a:5e:f2:b1:db:d2:ca:5d:7d:fc:fe:7b:f5:01:
+ 65:87:25:cd:4c:68:09:16:bd:c7:b0:a4:d2:89:5e:dd:92:44:
+ 6c:6e:7a:fe:7e:05:e2:2b:56:96:96:16:44:4a:01:87:8f:0c:
+ df:35:88:97:3e:e5:21:23:a2:af:87:ad:ee:f7:9e:05:36:f7:
+ 96:88:c8:fa:92:33:c2:60:2e:14:d9:ea:34:ab:04:a6:78:04:
+ be:da
+-----BEGIN CERTIFICATE-----
+MIIDjDCCAvWgAwIBAgIBCTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+NDUxM1oXDTQxMDMyMzE0NDUxM1owZjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEc
+MBoGA1UEAwwTc2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMmDenVCzzWklcfI2E0ZDomH1b35L+4gLEzKbQvBEFsGG8Sh
+JhIlBnoe0ebQkSujyHTelRDZ/yAD7ITbSdmk6cKT8NIyAaZV2xS/Fv6I4ORGD2q9
+J5VFLo0T4pkJdOQrMsNtYQyGhesS9dyee9MAo870iktRf6LGC1Kk8UHVAVOImbk7
+KfhDXqTHQdnTNEPyx6aNIhz5smPL34Ocb+zjsGOvC1HJIMrCWcEs7N43GHY9c4WC
+EhHNtu8ve2TNoy32elR/s0/JOPRittoA8Fnf4dMVyktzbCLBmsFRxChZD3EqOekX
+CJ2wiGGnU2fa3PtuOPeozc2I7dlMiPSkdV4/i/8CAwEAAaOBmDCBlTAJBgNVHRME
+AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiT3BlblNTTCBD
+ZXJ0aWZpY2F0ZSBmb3IgU1NMIFNlcnZlcjBCBgNVHREEOzA5gg0qLmV4YW1wbGUu
+Y29tggkxMjcuMC4wLjGCCG1vcmVmdW4hhwSaAgIDgQ11c2VyQGhvc3QuY29tMA0G
+CSqGSIb3DQEBBQUAA4GBAAuCxn3gunEk1qj0y28P9mkoMpiB5hRJgQf/kt0KpGg8
+kgDljEPRKQRKXvKx29LKXX38/nv1AWWHJc1MaAkWvcewpNKJXt2SRGxuev5+BeIr
+VpaWFkRKAYePDN81iJc+5SEjoq+Hre73ngU295aIyPqSM8JgLhTZ6jSrBKZ4BL7a
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJg3p1Qs81pJXH
+yNhNGQ6Jh9W9+S/uICxMym0LwRBbBhvEoSYSJQZ6HtHm0JEro8h03pUQ2f8gA+yE
+20nZpOnCk/DSMgGmVdsUvxb+iODkRg9qvSeVRS6NE+KZCXTkKzLDbWEMhoXrEvXc
+nnvTAKPO9IpLUX+ixgtSpPFB1QFTiJm5Oyn4Q16kx0HZ0zRD8semjSIc+bJjy9+D
+nG/s47BjrwtRySDKwlnBLOzeNxh2PXOFghIRzbbvL3tkzaMt9npUf7NPyTj0Yrba
+APBZ3+HTFcpLc2wiwZrBUcQoWQ9xKjnpFwidsIhhp1Nn2tz7bjj3qM3NiO3ZTIj0
+pHVeP4v/AgMBAAECggEAbaQ12ttQ9rToMd2bosdBW58mssiERaIHuHhjQIP5LC10
+qlWr6y9uCMAAIP/WHNJuXPhGTvbtkzPPWrIdymeqMI5h91vx/di07OLT1gYPpuRf
+uwnUIamUnHn3TqEQkpzWb/JxXWlMMA0O7MzmPnYYqp/vJu/e7Geo/Xx1MAZ/RD0U
+YUvrjAyHcor01VVa/eV69jL+6x9ExFNmRYRbmjmK/f10R4o86nIfqhXbM8qKsT6x
+1U/S2I4oModm0x12PgiMDMDzVD+cNE/h8lSnFtBTNEY3xRe7CZnhMV4nBVGjWi9D
+XjcIBA0kGd4G10ploiF+37J/PQbyodLA/Y30BIYCkQKBgQD6XvEzd4DbBa08pcCa
+CYZd5pyAHur1GzJ4rTQNqB84hzuyG6dKkk0rPXjExrj/GAtGWg2ohggmC5OPInKM
+WdpMC56Q0aZYMId3Be/Wg4kRgFO0YOsrx0dRVi5nwbRXkMjXbfewSopwbzP5hIo1
+7rfOhdhbjXx6W269FPE4Epmj1QKBgQDOC1QjGeEzwEgSq3LuojRLHFo31pWYr7UU
+sxhpoWMB6ImPMVjXaEsRKfc7Gulpee1KVQLVmzbkqrHArVNXEpuG4egRwZ10UJ0L
+v4PqrElyHKxgAvllflkkMSX4rx791T+AZMq6W5VX1fKiojfvSLzmEFaI6VmS43GZ
+KCz9RFbegwKBgHSE4vP01b8YsTrcWPpXHHVu8b6epPJVKfQHh4YjjAQey6VkQULv
+O4K4JRBO+6GcawLeviSD3B74nD+s5Gp1Fqb1cWIsb6HzU9gMp0XKCWxfsJTt1gSV
+xZcQ6J/ZAjkOZKn9v5wH1M3msuWYzUm0Q06V888H1bqL+sl8iZZy8ZXRAoGBALf6
+GZh2BUYGTNSOzkMSBouCt3PgYRdC3PesqwG2nwcXMazwLRm6AD1FMYJPF1edDSow
+GiXNQAiR+cHHggDflourr2IbdZJkYLYavZmPWM1RmQDp5vKfDM1qLTOOeqe//8GP
+Pg2EtScG3G4nVraMRk9PC1WYtuiXudk9rF5A5SgtAoGBAL1oVSnQpi5tzBNJqhzM
+mQIF7ct5WNj2b1lKqqsXUTd2pcgMCRrryatqH+gLz1rAjtbVfx2FAYkutH5TFgqP
+c4uomUH3so1EjEA8GtFS9SSkLn5nIr4TnVy4+Qsr1svOo8mhtztORXz+xOTxR6ud
+p7rd/YEbc5GhNSXlcW+apZW+
+-----END PRIVATE KEY-----
diff --git a/test/legacy26/jstests/libs/mockkrb5.conf b/test/legacy26/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/test/legacy26/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/test/legacy26/jstests/libs/mockservice.keytab b/test/legacy26/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/test/legacy26/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/test/legacy26/jstests/libs/mockuser.keytab b/test/legacy26/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/test/legacy26/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/test/legacy26/jstests/libs/network.js b/test/legacy26/jstests/libs/network.js
new file mode 100644
index 00000000000..e5b33f3219e
--- /dev/null
+++ b/test/legacy26/jstests/libs/network.js
@@ -0,0 +1,37 @@
+
+// Parse "127.0.0.1:300" into {addr: "127.0.0.1", port: 300},
+// and "127.0.0.1" into {addr: "127.0.0.1", port: undefined}
+function parseHost (hostString) {
+ var items = hostString.match(/(\d+.\d+.\d+.\d+)(:(\d+))?/)
+ return {addr: items[1], port: parseInt(items[3])}
+}
+
+
+/* Network traffic shaping (packet dropping) to simulate network problems
+ Currently works on BSD Unix and Mac OS X only (using ipfw).
+ Requires sudo access.
+ TODO: make it work on Linux too (using iptables). */
+
+var nextRuleNum = 100 // this grows indefinitely but can't exceed 65534, so can't call routines below indefinitely
+var portRuleNum = {}
+
+// Cut network connection to local port by dropping packets using iptables
+function cutNetwork (port) {
+ portRuleNum[port] = nextRuleNum
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any to any ' + port)
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any ' + port + ' to any')
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
+
+// Restore network connection to local port by not dropping packets using iptables
+function restoreNetwork (port) {
+ var ruleNum = portRuleNum[port]
+ if (ruleNum) {
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum++)
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum)
+ delete portRuleNum[port]
+ }
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
diff --git a/test/legacy26/jstests/libs/parallelTester.js b/test/legacy26/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..d5cb5346abe
--- /dev/null
+++ b/test/legacy26/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/test/legacy26/jstests/libs/password_protected.pem b/test/legacy26/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..87976e7a574
--- /dev/null
+++ b/test/legacy26/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIgWTIkEmBBfoCAggA
+MBQGCCqGSIb3DQMHBAjzL6xrCrEygwSCBMihG8kg3nTnTtWAbB+d1D+HJxriqm37
+7rwjkfa+T5w5ZBRGpsTt3QB5ep0maX72H55ns6ukkeMoDBSadhDWrGWcLQ2IOGt3
+E14KU6vMFe3gQkfF1fupp7F+3ma58/VNUKa4X5pzZ7OCf8inlLWejp8BRqbrPWqw
+Errgw1kNN3gWfQMr7JtIt1yI1xIMEB2Z976Jn0gaGnJAtzIW4thqjkDdb8b33S9f
+cb7N1Fq4cly22f9HdqNcLgVTi1zIlPXc/f/6mtsGTsJv/rMPthJ7c3Smvh3Fce2G
+w8e+ypfey+9QG3fk7RslaFRe8ShgqfdR8CAalp2UzwNbX91Agyuim3TA6s4jM8N9
+cF6CXlqEaA4sKhiOJmw69DfTC7QRee/gi2A8bz17pX85nKrGiLYn+Od8CEhTFxVk
+lNgBLv4+RcYHVqxWlbJMdDliMN53E+hYbh0y+GDLjteEXbrxRo1aSgd/9PGiSl97
+KY4F7b/OwRzRZh1F+cXY+uP5ZQMbx5EMMkhzuj3Hiy/AVlQrW2B1lXtcf11YFFJj
+xWq6YcpmEjL+xRq1PgoU7ahl6K0A3ScedQA5b1rLdPE8+bkRAfoN+0r8HVkIL7M+
+PorrwuWnvUmovZ0yDvm153HVvRnKZKHcelklphuUWfXvcRNITG/Rx6ssj+MVjqjb
+Xy7t7wgIrk10TFWNEcunGjSSjPDkjYPazJ2dasI0rODzhlQzrnlWM+El9P5zSu2z
+1Bvet44nmAKi2WLMda5YKbJcLSNbpBFB+rTwDt/D+dfwsJeC0sjpzzatKGXNJLJQ
+7x9BZfAbBn0QrIZYGMkaxWvcpJcaVUbCKiST4DK5ze584ptrlH+Bqw4u4xLcVrdk
+hu/8IBNybLrl4zahIz7bRRNmw5wo9zUVXPXEtuYak+MK+gmD3TzJ12OUKAlAj3Go
+Fj3NFQoxBJJjuXM3zZRvHp+/AAOUANBYIyV2WssF6C+SH4o+jKyxWC/GawPFvx/B
+gy55kdEt+ORdcOfV8L5Q2xI8Qpck6E3odmaHCvjz1bUVUWqhJcTuoewHRBfWiWgc
+UCXBS/YgendUQroBOPyYIwTtk4XY9fhhKGI4LhWcx4LfzntBnM9FGmDOwhu3HqEd
+HOs8p+HhB8LPjGRot63m7gkJ1T6AswSi9hTeZeSgXuSgL23zqwPGbGTwO3AmFs/M
+8luXQ4My9bk74K3d9lFdJPaxeTpeeWNodnBItbioT5aImptU+pkKWLTVmXi4V+JE
+1ootg+DSbz+bKp4A/LLOBO4Rsx5FCGAbBMnKc/n8lF86LjKq2PLRfgdPCaVfBrcd
+TnOkBZYU0HwJAc++4AZQJvA/KRB4UPUzMe2atjVxcrr6r6vL8G04+7TBFoynpzJ+
+4KZPCJz0Avb4wYKu/IHkdKL7UY8WEGz1mMDbAu4/xCriLg49D2f1eY3FTEjBotBI
+J9hE4ccmwqlxtl4qCVRezh0C+viJ6q2tCji2SPQviaVMNWiis9cZ52J+F9TC2p9R
+PdatJg0rjuVzfoPFE8Rq8V6+zf818b19vQ4F31J+VXTz7sF8it9IO0w/3MbtfBNE
+pKmMZ9h5RdSw1kXRWXbROR9XItS7gE1wkXAxw11z7jqNSNvhotkJXH/A5qGpTFBl
+Z8A=
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDczCCAtygAwIBAgIBCzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MTgxMFoXDTQxMDQyMjE1MTgxMFowazELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRAwDgYDVQQDDAdsYXphcnVzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA0+uq+UcogTSS+BLNTwwsBU7/HnNNhNgLKnk8pdUC
+UFOzAjXnXlXEravmbhWeIj5TsCElc5FPE66OvmiixFU6l27Z5P8gopjokxll7e1B
+ujeJOXgy5h+K76xdeQ90JmQX4OO0K5rLXvNH3ufuhGr2NObrBz6kbF5Wdr3urPl6
+pFSLH02zPLqPHhhUvO8jcbUD3RrS/5ZGHqE++F+QRMuYeCXTjECA8iLDvQsiqvT6
+qK1y04V/8K0BYJd/yE31H3cvRLUu7mRAkN87lY1Aj0i3dKM/l2RAa3tsy2/kSDH3
+VeUaqjoPN8PTfJaoMZz7xV7C+Zha+JZh3E7pq6viMR6bkwIDAQABo3sweTAJBgNV
+HRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZp
+Y2F0ZTAdBgNVHQ4EFgQUbw3OWXLJpkDMpGnLWM4vxSbwUSAwHwYDVR0jBBgwFoAU
+B0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAL+OC9x0P7Ql+
+8NbONrIeOIoJD++K5rUM0vI+u9RDAxTm9TO6cP7Cl6H4zzvlzJ3w9DL66c2r+ZTy
+BxzFO1wtDKUo5RJKneC0tMz0rJQIWTqo45fDLs8UIDB5t4xp6zed34nvct+wIRaV
+hCjHBaVmILlBWb6OF9/kl1JhLtElyDs=
+-----END CERTIFICATE-----
diff --git a/test/legacy26/jstests/libs/server.pem b/test/legacy26/jstests/libs/server.pem
new file mode 100644
index 00000000000..e5980d4856e
--- /dev/null
+++ b/test/legacy26/jstests/libs/server.pem
@@ -0,0 +1,34 @@
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAK53miP9GczBWXnq
+NxHwQkgVqsDuesjwJbWilMK4gf3fjnf2PN3qDpnGbZbPD0ij8975pIKtSPoDycFm
+A8Mogip0yU2Lv2lL56CWthSBftOFDL2CWIsmuuURFXZPiVLtLytfI9oLASZFlywW
+Cs83qEDTvdW8VoVhVsxV1JFDnpXLAgMBAAECgYBoGBgxrMt97UazhNkCrPT/CV5t
+6lv8E7yMGMrlOyzkCkR4ssQyK3o2qbutJTGbR6czvIM5LKbD9Qqlh3ZrNHokWmTR
+VQQpJxt8HwP5boQvwRHg9+KSGr4JvRko1qxFs9C7Bzjt4r9VxdjhwZPdy0McGI/z
+yPXyQHjqBayrHV1EwQJBANorfCKeIxLhH3LAeUZuRS8ACldJ2N1kL6Ov43/v+0S/
+OprQeBTODuTds3sv7FCT1aYDTOe6JLNOwN2i4YVOMBsCQQDMuCozrwqftD17D06P
+9+lRXUekY5kFBs5j28Xnl8t8jnuxsXtQUTru660LD0QrmDNSauhpEmlpJknicnGt
+hmwRAkEA12MI6bBPlir0/jgxQqxI1w7mJqj8Vg27zpEuO7dzzLoyJHddpcSNBbwu
+npaAakiZK42klj26T9+XHvjYRuAbMwJBAJ5WnwWEkGH/pUHGEAyYQdSVojDKe/MA
+Vae0tzguFswK5C8GyArSGRPsItYYA7D4MlG/sGx8Oh2C6MiFndkJzBECQDcP1y4r
+Qsek151t1zArLKH4gG5dQAeZ0Lc2VeC4nLMUqVwrHcZDdd1RzLlSaH3j1MekFVfT
+6v6rrcNLEVbeuk4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBCjANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNTEz
+MjU0MFoXDTQxMDQyMTEzMjU0MFowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBAK53miP9GczBWXnqNxHwQkgVqsDuesjwJbWilMK4gf3fjnf2
+PN3qDpnGbZbPD0ij8975pIKtSPoDycFmA8Mogip0yU2Lv2lL56CWthSBftOFDL2C
+WIsmuuURFXZPiVLtLytfI9oLASZFlywWCs83qEDTvdW8VoVhVsxV1JFDnpXLAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBQgCkKiZhUV9/Zo7RwYYwm2cNK6tzAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCbsfr+Q4pty4Fy38lSxoCgnbB4pX6+Ex3xyw5zxDYR3xUlb/uHBiNZ1dBrXBxU
+ekU8dEvf+hx4iRDSW/C5N6BGnBBhCHcrPabo2bEEWKVsbUC3xchTB5rNGkvnMt9t
+G9ol7vanuzjL3S8/2PB33OshkBH570CxqqPflQbdjwt9dg==
+-----END CERTIFICATE-----
diff --git a/test/legacy26/jstests/libs/slow_weekly_util.js b/test/legacy26/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..1e2c7391cb1
--- /dev/null
+++ b/test/legacy26/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed succesfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/test/legacy26/jstests/libs/smoke.pem b/test/legacy26/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..0f6deb368c5
--- /dev/null
+++ b/test/legacy26/jstests/libs/smoke.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLSU04xAL7eZ/Y
+J3euMDP/Uq7+a65zEIk7wzD2K5Htosbdysn67l8OzVlF2/IcB0/2SLuHHyC7+4pv
+O2+ndtvi6hr9zF4S8Bz0In/UUb+WzhFHuZd0YLl2arhnYMoDUkyLheVqEcDbECgi
+a6i5SNpAff2eUy29FVGwsaUl7+iEHqYxS9Ibmw1CeQYLEOGyhkTI9BjfO/3HwQyW
+FmOJp/IAJUFRCXTgluaMHptaonX5GmRK64wlF8Reu+uyQRdWM0cK9b3AxbBWAAyT
+SLQto+PW1J7QQ95Kn+aJ8nH1Jj80iUAjx2yAGchl1wfSHf5yAAo4OJNXgKUrQHIs
+dofsw/KTAgMBAAECggEBAItF+SX/BJwNw7lvsMsiMz2mBEZCuA4VMjBDlnPRffT1
+JJInsSG91lppzdPS0JjrWZk+U1xLsz2XJEz4x5JQGG3qPfvL3FfVMcEBMdrg9wX2
+wFgHiwAslGPQ0e3hngWQiOi+H2MALsTm2NhcMghfJUgyCWRDUH7O8FzCGIdZSk/Z
+Bx4CvBad+k+OFvUt03gwGtoCn7XneMRVGt04EU/srg0h6C3810k7+OLC1xZc8jaE
+5UAZwKO4pqJn/w0s9T2eAC+b+1YNuUTLvMTdhfH6ZkANxgcfQHWok14iGxCyXMeQ
+dBHeyNTIYKnfpwjFz85LgEvl4gsUTaa/IM0DfGPDOkECgYEA5z8Px0Sh0DSRr6PW
+3Ki9sDtJP5f+x0ARaebOfkscOJ5YvDejIxVNVBi5PYRtfCyLT78AKpRfxtBDQtW1
+w02xqkh/RR/GZm8hLyh/KzroTA3+GQvMqnE1irkJCKEOWwUjZNAFt+kgZIQWCfbn
+V1CjeK9xnEt00Icn7sh1CKubvakCgYEA4QwKZ2zj10i90NqlAAJlj6NTK/h+bHHw
+6VkUUO93GJZ1cC++dVZRhPTqBRdACJSey4nCMFdO3PLwy2gBG9LwU4rcN0Euo2bm
+J2uBBJVoXySE1250vem9I7KAramtTzQuHtIEvYhB3DHY+oYv4Eg6NSB4zAdtDKiV
+iiP23IN0+9sCgYA0KHconQRab+EEWtIVx0GxxE2LOH9Q9dR3rIWa2tossxqUqX/0
+Y9OjSkhN5dbEEVAC1rP05q6Lq2Hga0+qE5YlMGD0eGxJons7pci5OXo33VgY0h6B
+uzM2bPHqrlkMkqYfEQSZLM4PnfNSoAwiF6Anknrvo91fQ3zwUOqE4CAqsQKBgGX2
+a5xShKRcy8ud1JY9f8BlkmBgtP7zXOCMwJyu8nnMaacLqrJFCqg/wuvNjfCVTaEQ
+aFA4rn2DAMBX/fCaUNK5Hm9WdAgKrgp8Nbda7i/1Ps7Qt8n35f8PeCe2sdQp4x+J
+riYlXxmh6BoRxA1NDDpX3QMr9id/FknBY66jTNRzAoGBALab2GqBYInkmPj1nGDA
+f9+VQWFzl98k0PbLQcvKgbWuxLDf/Pz9lBi9tPzhNuTRt9RLuCMc5ZbpPbHPNWI0
+6+zofHTHoW0+prDdtZqpEE/TKmr8emjYMf4CBIKwW3CwbBRLr9C8G01ClTaan2Ge
+LMUhIseBsaQhmkL8n1AyauGL
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDlzCCAn+gAwIBAgIJAJDxQ4ilLvoVMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV
+BAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAM
+BgNVBAoMBTEwZ2VuMR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTAeFw0x
+MjEyMDQxNTA0MDJaFw0xODA1MjcxNTA0MDJaMGIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAMBgNVBAoMBTEwZ2Vu
+MR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMtJTTjEAvt5n9gnd64wM/9Srv5rrnMQiTvDMPYrke2i
+xt3KyfruXw7NWUXb8hwHT/ZIu4cfILv7im87b6d22+LqGv3MXhLwHPQif9RRv5bO
+EUe5l3RguXZquGdgygNSTIuF5WoRwNsQKCJrqLlI2kB9/Z5TLb0VUbCxpSXv6IQe
+pjFL0hubDUJ5BgsQ4bKGRMj0GN87/cfBDJYWY4mn8gAlQVEJdOCW5owem1qidfka
+ZErrjCUXxF6767JBF1YzRwr1vcDFsFYADJNItC2j49bUntBD3kqf5onycfUmPzSJ
+QCPHbIAZyGXXB9Id/nIACjg4k1eApStAcix2h+zD8pMCAwEAAaNQME4wHQYDVR0O
+BBYEFO6qoBUb1CN4lCkGhaatcjUBKwWmMB8GA1UdIwQYMBaAFO6qoBUb1CN4lCkG
+haatcjUBKwWmMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAGcJdsiI
+JdhJDPkZksOhHZUMMRHLHfWubMGAvuml6hs+SL850DRc+vRP43eF/yz+WbEydkFz
+3qXkQQSG8A2bLOtg0c6Gyi5snUOX0CKcOl3jitgwVkHcdX/v6vbiwALk+r8kJExv
+vpiWIp3nxgLtYVJP/XPoEomEwmu5zWaw28MWXM4XrEjPYmK5ZL16VXXD+lfO0cnT
+2vjkbNK8g7fKaIYYX+cr8GLZi19kO+jUYfhtxQbn8nxUfSjHseAy9BbOLUbGTdAV
+MbGRQveOnFW0eDLjiZffwqCtn91EtYy+vBuYHT/C7Ws4hNwd9lTvmg0SHAm01vi1
+b4fBFFjNvg1wCrU=
+-----END CERTIFICATE-----
diff --git a/test/legacy26/jstests/libs/test_background_ops.js b/test/legacy26/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..91f50aaa362
--- /dev/null
+++ b/test/legacy26/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw "Error in parallel ops " + procName + " : "
+ + tojson( result.err )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/test/legacy26/jstests/libs/testconfig b/test/legacy26/jstests/libs/testconfig
new file mode 100644
index 00000000000..0c1fc871d61
--- /dev/null
+++ b/test/legacy26/jstests/libs/testconfig
@@ -0,0 +1,4 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
diff --git a/test/legacy26/jstests/libs/testconfig.json b/test/legacy26/jstests/libs/testconfig.json
new file mode 100644
index 00000000000..5af32aad7d3
--- /dev/null
+++ b/test/legacy26/jstests/libs/testconfig.json
@@ -0,0 +1,4 @@
+{
+ "fastsync" : true,
+ "version" : false
+}
diff --git a/test/legacy26/jstests/libs/trace_missing_docs.js b/test/legacy26/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/test/legacy26/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/test/legacy26/jstests/misc/biginsert.js b/test/legacy26/jstests/misc/biginsert.js
new file mode 100755
index 00000000000..ebbdc18ba3e
--- /dev/null
+++ b/test/legacy26/jstests/misc/biginsert.js
@@ -0,0 +1,18 @@
+o = "xxxxxxxxxxxxxxxxxxx";
+o = o + o;
+o + o;
+o = o + o;
+o = o + o;
+o = o + o;
+
+var B = 40000;
+var last = new Date();
+for (i = 0; i < 30000000; i++) {
+ db.foo.insert({ o: o });
+ if (i % B == 0) {
+ var n = new Date();
+ print(i);
+ print("per sec: " + B*1000 / (n - last));
+ last = n;
+ }
+}
diff --git a/test/legacy26/jstests/replsets/rslib.js b/test/legacy26/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..8b7d2ed1263
--- /dev/null
+++ b/test/legacy26/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getMaster().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getMaster().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/test/legacy26/jstests/tool/csv1.js b/test/legacy26/jstests/tool/csv1.js
new file mode 100644
index 00000000000..5eb7ab0249a
--- /dev/null
+++ b/test/legacy26/jstests/tool/csv1.js
@@ -0,0 +1,42 @@
+// csv1.js
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( tojson( base ) , tojson(x) , "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/test/legacy26/jstests/tool/csvexport1.js b/test/legacy26/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/test/legacy26/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/test/legacy26/jstests/tool/csvexport2.js b/test/legacy26/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..3e0dd2c6829
--- /dev/null
+++ b/test/legacy26/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/test/legacy26/jstests/tool/csvimport1.js b/test/legacy26/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..3bff1110cbe
--- /dev/null
+++ b/test/legacy26/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/test/legacy26/jstests/tool/data/a.tsv b/test/legacy26/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/test/legacy26/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/test/legacy26/jstests/tool/data/csvimport1.csv b/test/legacy26/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/test/legacy26/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/test/legacy26/jstests/tool/data/dumprestore6/foo.bson b/test/legacy26/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 00000000000..b8f8f99e6bf
--- /dev/null
+++ b/test/legacy26/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson b/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 00000000000..dde25da302a
--- /dev/null
+++ b/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/test/legacy26/jstests/tool/dumpauth.js b/test/legacy26/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..2fcd32a9157
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumpauth.js
@@ -0,0 +1,38 @@
+// dumpauth.js
+// test mongodump with authentication
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
+
+// SERVER-5233: mongodump with authentication breaks when using "--out -"
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol",
+ "--out", "-" );
+assert.eq(x, 0, "mongodump should succeed with authentication while using '--out'");
diff --git a/test/legacy26/jstests/tool/dumpfilename1.js b/test/legacy26/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..fbe24551929
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumpfilename1.js
@@ -0,0 +1,14 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+c.getCollection("df/").insert({ a: 3 })
+assert(c.getCollection("df/").count() > 0) // check write worked
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/test/legacy26/jstests/tool/dumprestore1.js b/test/legacy26/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..fd1e8789ea6
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore1.js
@@ -0,0 +1,23 @@
+// dumprestore1.js
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore10.js b/test/legacy26/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..f59b131bb05
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore10.js
@@ -0,0 +1,63 @@
+// simple test to ensure write concern functions as expected
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/test/legacy26/jstests/tool/dumprestore3.js b/test/legacy26/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..96758219a2a
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore3.js
@@ -0,0 +1,60 @@
+// dumprestore3.js
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, _isWindows() ? -1 : 255, "mongorestore should exit w/ -1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, _isWindows() ? -1 : 255, "mongoreimport should exit w/ -1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/test/legacy26/jstests/tool/dumprestore4.js b/test/legacy26/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..568e196061f
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore4.js
@@ -0,0 +1,42 @@
+// dumprestore4.js -- see SERVER-2186
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore6.js b/test/legacy26/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..d8b349e9589
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore7.js b/test/legacy26/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..2c9e6560f94
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getMaster().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/test/legacy26/jstests/tool/dumprestore8.js b/test/legacy26/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..4e6591738d6
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore8.js
@@ -0,0 +1,105 @@
+// dumprestore8.js
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" );
+
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" );
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore9.js b/test/legacy26/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..4bbb2fc18b1
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, {chunksize:1} );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js b/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..d6b87ffe70c
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,107 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options, "restore options not ignored");
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert( undefined === db.capped.exists().options );
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore_auth.js b/test/legacy26/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..f99b5d0405c
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,35 @@
+// dumprestore_auth.js
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern", "0"); // Should fail
+assert.eq(0 , c.count() , "after restore without auth");
+
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+assert.eq(3, adminDB.system.users.count());
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore_auth2.js b/test/legacy26/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..fd7d9a034d3
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,96 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/dumprestore_auth3.js b/test/legacy26/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..b87418ed176
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,199 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod);
diff --git a/test/legacy26/jstests/tool/dumpsecondary.js b/test/legacy26/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..00f166dcf4c
--- /dev/null
+++ b/test/legacy26/jstests/tool/dumpsecondary.js
@@ -0,0 +1,38 @@
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/test/legacy26/jstests/tool/exportimport1.js b/test/legacy26/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..a7a7bcee90c
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport1.js
@@ -0,0 +1,66 @@
+// exportimport1.js
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/exportimport3.js b/test/legacy26/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..f18ba6cbd4b
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport3.js
@@ -0,0 +1,27 @@
+// exportimport3.js
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/exportimport4.js b/test/legacy26/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/exportimport5.js b/test/legacy26/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/exportimport6.js b/test/legacy26/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a01d49a9c8b
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport6.js
@@ -0,0 +1,26 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/test/legacy26/jstests/tool/exportimport_bigarray.js b/test/legacy26/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..43a209b8453
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,62 @@
+// Test importing collections represented as a single line array above the maximum document size
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+for (i = 0; i < numDocs; ++i) {
+ src.insert({ x : bigString });
+}
+var lastError = exportimport_db.getLastError();
+if (lastError == null) {
+ print('Finished inserting ' + numDocs + ' documents');
+}
+else {
+ doassert('Insertion failed: ' + lastError);
+}
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/test/legacy26/jstests/tool/exportimport_date.js b/test/legacy26/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..57a860ca1a8
--- /dev/null
+++ b/test/legacy26/jstests/tool/exportimport_date.js
@@ -0,0 +1,49 @@
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/test/legacy26/jstests/tool/files1.js b/test/legacy26/jstests/tool/files1.js
new file mode 100644
index 00000000000..acfcc16dcc3
--- /dev/null
+++ b/test/legacy26/jstests/tool/files1.js
@@ -0,0 +1,27 @@
+// files1.js
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/test/legacy26/jstests/tool/oplog1.js b/test/legacy26/jstests/tool/oplog1.js
new file mode 100644
index 00000000000..e9a002bfb65
--- /dev/null
+++ b/test/legacy26/jstests/tool/oplog1.js
@@ -0,0 +1,26 @@
+// oplog1.js
+
+// very basic test for mongooplog
+// need a lot more, but test that it functions at all
+
+t = new ToolTest( "oplog1" );
+
+db = t.startDB();
+
+output = db.output
+
+doc = { _id : 5 , x : 17 };
+
+db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+
+assert.eq( 0 , output.count() , "before" )
+
+t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+
+assert.eq( 1 , output.count() , "after" );
+
+assert.eq( doc , output.findOne() , "after check" );
+
+t.stop();
+
+
diff --git a/test/legacy26/jstests/tool/oplog_all_ops.js b/test/legacy26/jstests/tool/oplog_all_ops.js
new file mode 100644
index 00000000000..8f231cb233d
--- /dev/null
+++ b/test/legacy26/jstests/tool/oplog_all_ops.js
@@ -0,0 +1,61 @@
+/**
+ * Performs a simple test on mongooplog by doing different types of operations
+ * that will show up in the oplog then replaying it on another replica set.
+ * Correctness is verified using the dbhash command.
+ */
+
+var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl1.startSet({ oplogSize: 10 });
+repl1.initiate();
+repl1.awaitSecondaryNodes();
+
+var repl1Conn = new Mongo(repl1.getURL());
+var testDB = repl1Conn.getDB('test');
+var testColl = testDB.user;
+
+// op i
+testColl.insert({ x: 1 });
+testColl.insert({ x: 2 });
+
+// op c
+testDB.dropDatabase();
+
+testColl.insert({ y: 1 });
+testColl.insert({ y: 2 });
+testColl.insert({ y: 3 });
+
+// op u
+testColl.update({}, { $inc: { z: 1 }}, true, true);
+
+// op d
+testColl.remove({ y: 2 });
+
+// op n
+var oplogColl = repl1Conn.getCollection('local.oplog.rs');
+oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
+
+var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl2.startSet({ oplogSize: 10 });
+repl2.initiate();
+repl2.awaitSecondaryNodes();
+
+var srcConn = repl1.getPrimary();
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
+ '--host', repl2.getPrimary().host);
+
+var repl1Hash = testDB.runCommand({ dbhash: 1 });
+
+var repl2Conn = new Mongo(repl2.getURL());
+var testDB2 = repl2Conn.getDB(testDB.getName());
+var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+
+assert(repl1Hash.md5);
+assert.eq(repl1Hash.md5, repl2Hash.md5);
+
+repl1.stopSet();
+repl2.stopSet();
+
diff --git a/test/legacy26/jstests/tool/restorewithauth.js b/test/legacy26/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..ac9e7bc756b
--- /dev/null
+++ b/test/legacy26/jstests/tool/restorewithauth.js
@@ -0,0 +1,113 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 )
+
+//make sure it has no index except _id
+assert.eq(foo.system.indexes.count(), 2);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.system.indexes.count(), 3);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0);
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+// make sure that the collection is empty
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.system.indexes.count(), 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/test/legacy26/jstests/tool/stat1.js b/test/legacy26/jstests/tool/stat1.js
new file mode 100644
index 00000000000..96211f7d3f2
--- /dev/null
+++ b/test/legacy26/jstests/tool/stat1.js
@@ -0,0 +1,22 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+db.dropAllUsers();
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1" );
+assert.eq(x, _isWindows() ? -1 : 255, "mongostat should exit with -1 with eliot:wrong");
diff --git a/test/legacy26/jstests/tool/tool1.js b/test/legacy26/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/test/legacy26/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/test/legacy26/jstests/tool/tool_replset.js b/test/legacy26/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..b38dcb95806
--- /dev/null
+++ b/test/legacy26/jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/test/legacy26/jstests/tool/tsv1.js b/test/legacy26/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..1b0ddbb7c9e
--- /dev/null
+++ b/test/legacy26/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/test/legacy28/buildscripts/buildlogger.py b/test/legacy28/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..d2466e495c0
--- /dev/null
+++ b/test/legacy28/buildscripts/buildlogger.py
@@ -0,0 +1,479 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/test/legacy28/buildscripts/cleanbb.py b/test/legacy28/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/test/legacy28/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/test/legacy28/buildscripts/smoke.py b/test/legacy28/buildscripts/smoke.py
new file mode 100755
index 00000000000..f43c7fbcf5b
--- /dev/null
+++ b/test/legacy28/buildscripts/smoke.py
@@ -0,0 +1,1343 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import Connection
+from pymongo.errors import OperationFailure
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = Connection(port=self.port, slave_okay=True).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ Connection(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ mongod.dbhash = Connection(port=mongod.port, slave_okay=True).test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = Connection(port=master.port, slave_okay=True).test
+ sTestDB = Connection(port=slave.port, slave_okay=True).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True, set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = Connection(port=master.port, slave_okay=True);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "mmap_v1": ("mmap_v1/*.js", True),
+ "gle": ("gle/*.js", True),
+ "rocksDB": ("rocksDB/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/test/legacy28/buildscripts/utils.py b/test/legacy28/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/test/legacy28/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/test/legacy28/jstests/libs/analyze_plan.js b/test/legacy28/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..9c2ebffd890
--- /dev/null
+++ b/test/legacy28/jstests/libs/analyze_plan.js
@@ -0,0 +1,80 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ }
+ else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ }
+ else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ }
+ else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ }
+ else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/test/legacy28/jstests/libs/authTestsKey b/test/legacy28/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/test/legacy28/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/test/legacy28/jstests/libs/badSAN.pem b/test/legacy28/jstests/libs/badSAN.pem
new file mode 100644
index 00000000000..d8e362731e0
--- /dev/null
+++ b/test/legacy28/jstests/libs/badSAN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgIDAYKXMA0GCSqGSIb3DQEBBQUAMHQxFzAVBgNVBAMTDktl
+cm5lbCBUZXN0IENBMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzAeFw0xNDA5MjMxNTE3MjNaFw0zNDA5MjMxNTE3MjNaMG8xEjAQBgNV
+BAMTCTEyNy4wLjAuMTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCDB/lxuzeU
+OHR5nnOTJM0fHz0WeicnuUfGG5wP89Mbkd3Y+BNS0ozbnkW+NAGhD+ehNBjogISZ
+jLCd+uaYu7TLWpkgki+1+gM99Ro0vv7dIc8vD7ToILKMbM8xQmLbSxDT2tCUoXlc
+m7ccgDZl9oW1scQYQ8gWHjmk3yK8sCoGa/uwr49u74aVM7673tLsK41m8oYPzt/q
+VGT+mXpBJQcGXkTNQtIPxBtD25jr+aPietS3u70zrVPY6ZDsGE7DofEeRl97kVoF
+NcpaQmVEwEo8KCWaT6OaPaUUUjAMwzqiZaHNZ6mL1pCr65bLXP6T9tiMtWLw5+SG
+3E09fhQuWod5AgMBAAGjFTATMBEGA1UdEQQKMAiCBmJhZFNBTjANBgkqhkiG9w0B
+AQUFAAOCAQEAQzlibJvlUpJG3vc5JppdrudpXoVAP3wtpzvnkrY0GTWIUE52mCIf
+MJ5sARvjzs/uMhV5GLnjqTcT+DFkihqKyFo1tKBD7LSuSjfDvjmggG9lq0/xDvVU
+uczAuNtI1T7N+6P7LyTG4HqniYouPMDWyCKBOmzzNsk+r1OJb6cxU7QQwmSWw1n1
+ztNcF6JzCQVcd9Isau9AEXZ9q0M0sjD9mL67Qo3Dh3Mvf4UkJKqm3KOQOupUHZLU
+vJwfsS2u+gfHY1Plywzq3AuT7ygbksR3Pqfs8LFPnuRAH+41sFTGUM52hiU7mNPj
+ebl8s1tjK7WQ+a8GTABJV0hDNeWd3Sr+Og==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAgwf5cbs3lDh0eZ5zkyTNHx89FnonJ7lHxhucD/PTG5Hd2PgT
+UtKM255FvjQBoQ/noTQY6ICEmYywnfrmmLu0y1qZIJIvtfoDPfUaNL7+3SHPLw+0
+6CCyjGzPMUJi20sQ09rQlKF5XJu3HIA2ZfaFtbHEGEPIFh45pN8ivLAqBmv7sK+P
+bu+GlTO+u97S7CuNZvKGD87f6lRk/pl6QSUHBl5EzULSD8QbQ9uY6/mj4nrUt7u9
+M61T2OmQ7BhOw6HxHkZfe5FaBTXKWkJlRMBKPCglmk+jmj2lFFIwDMM6omWhzWep
+i9aQq+uWy1z+k/bYjLVi8OfkhtxNPX4ULlqHeQIDAQABAoIBAC4Bx8jyJmKpq+Pk
+CcqZelg6HLXesA7XlGbv3M0RHIeqoM2E1SwYd5LJMM3G7ueBcR/97dz8+xH6/yyJ
+Ixxvk9xu9CMmkRABN9AyVkA867nzHA73Idr7WBXMQreWCqXa5o6sXt5BEB6/If0k
+23TTqUERqLuoWQHDHRRRsJ218RuNmbvBe8TGXcfunC0eeDVKDeqAXol6bD5lztdu
+B6jkdLt5UZSQ7X8OmClbeDlac90B8usNi+pUE9q1p7X462vAw8LohkxLY2nyIcmU
+feNdTNHP+lklv+E+p9w/Az7Hf6zxm525tw90QVI048fr9SL3ftLHOt4FhucSCn0Z
+CjylP4ECgYEA+nQrNVdVwmxcWCVn69LR1grNXUSz+fLHCo+QKma4IyC1kuuZ+BBo
+Iwdf9t/S1tgtTYru3uxzCpQg7J1iDeEFEsMHl0rc6U1MmIE+6OvACVG3yotqoOqE
+852pi1OWIe94yTk2ZmNXJ8gpUE/gtMprbcSWOb7IzzrXy2lDcaEMuGkCgYEAhe7L
+ZvYI4LEvu6GSPp97qBzDH9m5UrHaTZIJk/Nu7ie919Sdg62LTfphsaK+pSyA55XQ
+8L9P7wNUPC44NnE+7CIJZsIuKdYqR5QI6No9RdTyij0Hgljfc7KuH2b8lf8EjvuH
+qZAf5zL3pIOQs8E8/MYHlGIqmTkYK41eCAcS9JECgYEADnra6KmU9rmnGR2IhZTZ
+tuNG/kZzlVbY9R5ZumnX6YgBl23xp+ri6muJu88y9GLpM5t9tfu7pvfrc2KiAaVp
+0qzd6nxUi1SBwituxK6kmqVT1+z5jDYi26bY34pEms+qjw+0unSx3EXxRYhouGsf
+jOgZu1rxZzHCuirq0E38W0kCgYBzOK16RX37t9OFywlioJekWCIxu4BouSNCirl8
+s/eiIUR8cqiUCPAIRLhZNtZmiTPYiBW5mAyvZiDIqUao56InSVznL3TBf0LeU2ea
+023VLs79yGU2aTjLc1PDJjl03XDRhWj/okMgBsPvn1QUoNDT8ZXBvPZC3VCC31qe
+818GUQKBgQDBUP2BC/Th/0dErOQ5lWkY3YbmzrTp2pDsHGZJRD+OdQ5B8FUvCP8m
+JESk/0ATn7niUqawnOy/2KlKIkeBBV2XL1rjIGEhCkBUuhCiInNDqz1AGdXzIKaT
+myoZ4PhIsH1D643e6iLhyAZuUAA4yB31E2a3l7EMyhV3vKbdWWygGQ==
+-----END RSA PRIVATE KEY-----
diff --git a/test/legacy28/jstests/libs/ca.pem b/test/legacy28/jstests/libs/ca.pem
new file mode 100644
index 00000000000..d1a5689cf0f
--- /dev/null
+++ b/test/legacy28/jstests/libs/ca.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/client.pem b/test/legacy28/jstests/libs/client.pem
new file mode 100644
index 00000000000..50a64e41728
--- /dev/null
+++ b/test/legacy28/jstests/libs/client.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIBAzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBwMQ8wDQYDVQQD
+EwZjbGllbnQxEzARBgNVBAsTCktlcm5lbFVzZXIxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJIFboAk9Fdi
+DY5Xld2iw36vB3IpHEfgWIimd+l1HX4jyp35i6xoqkZZHJUL/NMbUFJ6+44EfFJ5
+biB1y1Twr6GqpYp/3R30jKQU4PowO7DSal38MR34yiRFYPG4ZPPXXfwPSuwKrSNo
+bjqa0/DRJRVQlnGwzJkPsWxIgCjc8KNO/dSHv/CGymc9TjiFAI0VVOhMok1CBNvc
+ifwWjGBg5V1s3ItMw9x5qk+b9ff5hiOAGxPiCrr8R0C7RoeXg7ZG8K/TqXbsOZEG
+AOQPRGcrmqG3t4RNBJpZugarPWW6lr11zMpiPLFTrbq3ZNYB9akdsps4R43TKI4J
+AOtGMJmK430CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAA+nPgVT4addi13yB6mjW
++UhdUkFwtb1Wcg0sLtnNucopHZLlCj5FfDdp1RQxe3CyMonxyHTKkrWtQmVtUyvf
+C/fjpIKt9A9kAmveMHBiu9FTNTc0sbiXcrEBeHF5cD7N+Uwfoc/4rJm0WjEGNkAd
+pYLCCLVZXPVr3bnc3ZLY1dFZPsJrdH3nJGMjLgUmoNsKnaGozcjiKiXqm6doFzkg
+0Le5yD4C/QTaie2ycFa1X5bJfrgoMP7NqKko05h4l0B0+DnjpoTJN+zRreNTMKvE
+ETGvpUu0IYGxe8ZVAFnlEO/lUeMrPFvH+nDmJYsxO1Sjpds2hi1M1JoeyrTQPwXj
+2Q==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAkgVugCT0V2INjleV3aLDfq8HcikcR+BYiKZ36XUdfiPKnfmL
+rGiqRlkclQv80xtQUnr7jgR8UnluIHXLVPCvoaqlin/dHfSMpBTg+jA7sNJqXfwx
+HfjKJEVg8bhk89dd/A9K7AqtI2huOprT8NElFVCWcbDMmQ+xbEiAKNzwo0791Ie/
+8IbKZz1OOIUAjRVU6EyiTUIE29yJ/BaMYGDlXWzci0zD3HmqT5v19/mGI4AbE+IK
+uvxHQLtGh5eDtkbwr9Opduw5kQYA5A9EZyuaobe3hE0Emlm6Bqs9ZbqWvXXMymI8
+sVOturdk1gH1qR2ymzhHjdMojgkA60YwmYrjfQIDAQABAoIBAB249VEoNIRE9TVw
+JpVCuEBlKELYk2UeCWdnWykuKZ6vcmLNlNy3QVGoeeTs172w5ZykY+f4icXP6da5
+o3XauCVUMvYKKNwcFzSe+1xxzPSlH/mZh/Xt2left6f8PLBVuk/AXSPG2I9Ihodv
+VIzERaQdD0J9FmhhhV/hMhUfQ+w5rTCaDpq1KVGU61ks+JAtlQ46g+cvPF9c80cI
+TEC875n2LqWKmLRN43JUnctV3uGTmolIqCRMHPAs/egl+lG2RXJjqXSQ2uFLOvC/
+PXtBb597yadSs2BWPnTu/r7LbLGBAExzlQK1uFsTvuKsBPb3qrvUux0L68qwPuiv
+W24N8BECgYEAydtAvVB7OymQEX3mck2j7ixDN01wc1ZaCLBDvYPYS/Pvzq4MBiAD
+lHRtbIa6HPGA5jskbccPqQn8WGnJWCaYvCQryvgaA+BBgo1UTLfQJUo/7N5517vv
+KvbUa6NF0nj3VwfDV1vvy+amoWi9NOVn6qOh0K84PF4gwagb1EVy9MsCgYEAuTAt
+KCWdZ/aNcKgJc4NCUqBpLPF7EQypX14teixrbF/IRNS1YC9S20hpkG25HMBXjpBe
+tVg/MJe8R8CKzYjCt3z5Ff1bUQ2bzivbAtgjcaO0Groo8WWjnamQlrIQcvWM7vBf
+dnIflQ0slxbHfCi3XEe8tj2T69R7wJZ8L7PxR9cCgYEACgwNtt6Qo6s37obzt3DB
+3hL57YC/Ph5oMNKFLKOpWm5z2zeyhYOGahc5cxNppBMpNUxwTb6AuwsyMjxhty+E
+nqi2PU4IDXVWDWd3cLIdfB2r/OA99Ez4ZI0QmaLw0L8QoJZUVL7QurdqR9JsyHs6
+puUqIrb195s/yiPR7sjeJe0CgYEAuJviKEd3JxCN52RcJ58OGrh2oKsJ9/EbV0rX
+Ixfs7th9GMDDHuOOQbNqKOR4yMSlhCU/hKA4PgTFWPIEbOiM08XtuZIb2i0qyNjH
+N4qnqr166bny3tJnzOAgl1ljNHa8y+UsBTO3cCr17Jh0vL0KLSAGa9XvBAWKaG6b
+1iIXwXkCgYAVz+DA1yy0qfXdS1pgPiCJGlGZXpbBcFnqvbpGSclKWyUG4obYCbrb
+p5VKVfoK7uU0ly60w9+PNIRsX/VN/6SVcoOzKx40qQBMuYfJ72DQrsPjPYvNg/Nb
+4SK94Qhp9TlAyXbqKJ02DjtuDim44sGZ8g7b+k3FfoK4OtzNsqdVdQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/client_revoked.pem b/test/legacy28/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..03db67deb50
--- /dev/null
+++ b/test/legacy28/jstests/libs/client_revoked.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBAjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB4MRcwFQYDVQQD
+Ew5jbGllbnRfcmV2b2tlZDETMBEGA1UECxMKS2VybmVsVXNlcjEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+lJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSKyZMGCcqlYVQmqT/J
+Fnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505HaWv7b+M3qksRHDLpw
+/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/6vcUkg/aU/50MRUN
+qGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4lQjrCpR36fkr5a+vI
+UbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNFvGDOCNBKZK5ZxLZ3
+gGFcR6kL6u11y4zoLrZ6xwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQB8WQMn/cjh
+9qFtr7JL4VPIz/+96QaBmkHxMqiYL/iMg5Vko3GllLc1mgfWQfaWOvyRJClKj395
+595L2u8wBKon3DXUPAkinc6+VOwDWsxFLNtWl+jhigat5UDzGm8ZKFhl0WwNhqzZ
+dlNPrh2LJZzPFfimfGyVkhPHYYdELvn+bnEMT8ae1jw2yQEeVFzHe7ZdlV5nMOE7
+Gx6ZZhYlS+jgpIxez5aiKqit/0azq5GGkpCv2H8/EXxkR4gLZGYnIqGuZP3r34NY
+Lkh5J3Qnpyhdopa/34yOCa8mY1wW7vEro0fb/Dh21bpyEOz6tBk3C1QRaGD+XQOM
+cedxtUjYmWqn
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSK
+yZMGCcqlYVQmqT/JFnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505Ha
+Wv7b+M3qksRHDLpw/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/
+6vcUkg/aU/50MRUNqGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4l
+QjrCpR36fkr5a+vIUbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNF
+vGDOCNBKZK5ZxLZ3gGFcR6kL6u11y4zoLrZ6xwIDAQABAoIBAFlu0T3q18Iu0VlR
+n5GEYMgvSuM4NAVVKo8wwwYMwu1xuvXb+NMLyuyFqzaCQKpHmywOOnfhCC/KkxX8
+Ho87kTbTDKhuXZyOHx0cA1zKCDSlGdK8yt9M1vJMa0pdGi2M34b+uOQ35IVsOocH
+4KWayIH7g52V2xZ2bpOSSnpm0uCPZSBTgClCgTUYepOT2wbLn/8V0NtVpZhDsBqg
+fORuEHkiurrbLa8yjQsvbR+hsR/XbGhre8sTQapj4EITXvkEuOL/vwbRebhOFHgh
+8sipsXZ9CMaJkBpVoLZTxTKQID/9006cczJK2MGKFhn6mvP6AeFuJAM3xqLGZTc4
+xxpfJyECgYEA0+iKxy5r1WUpBHR8jTh7WjLc6r5MFJQlGgLPjdQW6gCIe/PZc+b9
+x5vDp27EQ1cAEePEu0glQ/yk19yfxbxrqHsRjRrgwoiYTXjGI5zZSjXKArHyEgBj
+XOyo5leO5XMFnk2AShPlh+/RhAW3NhxcWkBEAsCD6QyC3BPvP6aaAXkCgYEAs4WH
+dTuweTdnyquHQm59ijatvBeP8h4tBozSupflQjB9WxJeW5uEa8lNQ3lSz1F4TV3M
+xvGdDSqwftLRS2mWGho/1jaCeAzjsiUQ2WUHChxprt0+QU7XkJbaBY9eF+6THZFw
+sDG688TiolxqoD8OYi8EtxmIvbQhXHmXnrk3jj8CgYBSi74rkrisuqg8tQejl0Ht
+w+xsgM5wIblGJZwmOlzmsGh6KGYnkO6Ap/uSKELJnIVJcrk63wKtNigccjPGufwR
++EbA+ZxeCwmQ/B/q1XmLP+K+JAUQ4BfUpdexSqA+XwzsOnJj6NY7mr65t+RDbs7G
+1Uvo6oc37Ai5pAZJfCN3uQKBgQAJr5qvaJkM8UBYXwjdPLjpTCnzjBHoLlifkdmM
+18U23QbmcwdESg/LAQF6MoGVTf//rJ/v2/ltTHBZZ2aDex7uKZxoImjHsWpXokhW
+cmz+zqmlFarWOzrGQl1hD2s0P1sQrVg3KXe8z1KrD/Fw0/Yitga7GlWWZrGmG6li
+lvu4YQKBgQANODQYEaz739IoPNnMfTpTqAoQIOR4PNdMfCXSQrCB8i0Hh4z48E4F
+DEAd1xIYyxI8pu7r52dQlBk7yrILOTG0gmgLJd5xKdtCTrasYAICI3hsRLtP8dVA
+8WeykXY4Wf1bYQ+VzKVImkwL/SBm2ik5woyxCzT8JSjyoAwRrQp9Vw==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/cluster_cert.pem b/test/legacy28/jstests/libs/cluster_cert.pem
new file mode 100644
index 00000000000..a8623ab67ef
--- /dev/null
+++ b/test/legacy28/jstests/libs/cluster_cert.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkagAwIBAgIBBDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBxMRQwEgYDVQQD
+EwtjbHVzdGVydGVzdDEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCX42ZTwADG
+sEkS7ijfADlDQaJpbdgrnQKa5ssMQK3oRGSqXfTp0ThsJiVBbYZ8ZZRpPMgJdowa
+pFCGHQJh6VOdKelR0f/uNVpBGVz1yD4E4AtkA6UYcIJq6ywcj+W7Pli1Ed8VUN3Q
+tBU+HvHiEdMj74kLJb4ID1cP3gehvRv/0szkN8/ODFKCgYb1619BdFb9gRn8eily
+Wcg1m1gXz2xSfqRZkFEcEYet3BeOEGZBhaufJFzinvQjocH+kWFKlZf0+2DEFFbH
+NRqmabMmqMBUke629EUn8a7PBWBYNLld9afoNHwNY68wpONf5IqR2mNar5bVz8/d
+4g7BuVNvEFdJAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAA3U2O+cE/ZS8SDBw/sr
+BVFf0uaoME7+XX2jdTi4RUpWPfQ6uTkhKnXKzTzGrQtKwA96slGp4c3mxGBaAbC5
+IuTS97mLCju9NFvJVtazIajO4eNlG6dJSk0pQzjc0RAeLYksX/9NRNKZ+lQ5QVS2
+NVLce70QZBIvujjVJZ5hqDdjPV0JGOOUzNGyyUhzgY7s9MQagNnBSu5HO4CK1onc
+goOkizulq/5WF+JtqW8VKKx+/CH6SnTkS4b3qbjgKRmHZcOshH/d4KqhoLya7sfH
+pedmm7WgO9p8umXXqNj+04ehuPKTnD8tLMhj+GbJ9eIChPCBf1XnIzOXYep+fq9j
+n/g=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAl+NmU8AAxrBJEu4o3wA5Q0GiaW3YK50CmubLDECt6ERkql30
+6dE4bCYlQW2GfGWUaTzICXaMGqRQhh0CYelTnSnpUdH/7jVaQRlc9cg+BOALZAOl
+GHCCaussHI/luz5YtRHfFVDd0LQVPh7x4hHTI++JCyW+CA9XD94Hob0b/9LM5DfP
+zgxSgoGG9etfQXRW/YEZ/HopclnINZtYF89sUn6kWZBRHBGHrdwXjhBmQYWrnyRc
+4p70I6HB/pFhSpWX9PtgxBRWxzUapmmzJqjAVJHutvRFJ/GuzwVgWDS5XfWn6DR8
+DWOvMKTjX+SKkdpjWq+W1c/P3eIOwblTbxBXSQIDAQABAoIBAHhjNFMDZ1oUlgbc
+ICcI/VoyprFb8DA5ZbwzXBMdHPpxYzyp9hpxy3/lCDiAwEzPEsAK/h6KCOiA/pYe
+XioPD0gN1TIV+f3r6dqZaNYi3g1tK3odbXkejDCEsFT/NT6hXxw9yw0RKI9ofUHc
+synVqP3duUjNpH6s8fvQp0nqI0wzoNm1kklpTWVjZmbtSZF9m/xfv7NGwQEYUL2V
+f5YvX6aHPVDtUXAqyPBgv6SGuogSSjwRTsNTef3aY6Se5MlP3YIfRqdad8+ORkKu
+WSrO+GjQccV4sztD8Sn3LR7qe6Lmid4yopHSS4EFq0Sc8LznTeflWcRAsBLezRp5
+xZB/blECgYEA8yrEzFA247AOXbhL1CdqMyPs523oy5+dmByyovjYjEhjUCRlAa9D
+ApvID4TfAkA4n0rUdICCtwbZlFrBZbn6rXNvJ362ufZjvaFIucQm90YkG1J6Ldek
+8ohJfLyyLLWzVHJIS7WxFqqsGmDhYUTErFbJZjI8tNSglrc81jUWT7UCgYEAn+dw
+ICyc09f6+xm3nFZIOq2Gtpw8lrOJlwZugn1AqY2D5Ko2gq1Fx2oZWpVaBivjH3gU
+ONlnPuealE0RJHvCm/+axy7Rcj65IwTrN5V+j6rg1tuEdi70PvNKmN6XQqRvEjOX
+HOh3gQYP6EFAoVINZZqUkwJzqpv4tnOSpEHXncUCgYB3+Z8Vq3IZjtDXvslzCGtm
+hhAp81mLtdocpfQhYqP9Ou39KafIV/+49sGTnpwlUShet53xSUK1KSULBGgtV8Bt
++ela1DM1t3Joqn3mYfhTwoCoFl5/5cjVfRa8+6DxXEj5nlU7PY79PwIhFbG9ux9K
+ZJuD17+J/Oqq0gerLJAwjQKBgAS4AbkRV/dwcjmiwqZcbXk90bHl3mvcFH1edTho
+ldXrFS9UTpOApYSC/wiLS8LO3L76/i3HTKKwlwE1XQIknNOZsWmbWhby/uenp4FW
+agu3UTdF9xy9uft5loP4XaJb0+NHnnf97DjkgueptUyNbVPIQgYsllk8jRRlSLiM
+MN65AoGAUPLlh8ok/iNirO5YKqc5/3FKA1o1V1KSTHYVUK+Y+vuVJxQZeO3LMybe
+7AJ1cLHEWc8V4B27e6g33rfGGAW+/+RJ7/uHxuYCuKhstbq/x+rf9i4nl93emlMV
+PC3yuZsCmpk9Uypzi2+PT10yVgXkXRYtLpuUpoABWRzVXGnEsXo=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/command_line/test_parsed_options.js b/test/legacy28/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..f194b73ce7f
--- /dev/null
+++ b/test/legacy28/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,214 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Create and authenticate high-privilege user in case mongod is running with authorization.
+ // Try/catch is necessary in case this is being run on an uninitiated replset, by a test
+ // such as repl_options.js for example.
+ var ex;
+ try {
+ mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
+ mongod.getDB("admin").auth("root", "pass");
+ }
+ catch (ex) {
+ }
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ mongod.getDB("admin").logout();
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/test/legacy28/jstests/libs/config_files/disable_auth.ini b/test/legacy28/jstests/libs/config_files/disable_auth.ini
new file mode 100644
index 00000000000..c1193be1b03
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_auth.ini
@@ -0,0 +1 @@
+auth=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_dur.ini b/test/legacy28/jstests/libs/config_files/disable_dur.ini
new file mode 100644
index 00000000000..8f83f3ae5a7
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_dur.ini
@@ -0,0 +1 @@
+dur=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini b/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini
new file mode 100644
index 00000000000..fc839a98a76
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini
@@ -0,0 +1 @@
+httpinterface=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_ipv6.ini b/test/legacy28/jstests/libs/config_files/disable_ipv6.ini
new file mode 100644
index 00000000000..a091421022d
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_ipv6.ini
@@ -0,0 +1 @@
+ipv6=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_journal.ini b/test/legacy28/jstests/libs/config_files/disable_journal.ini
new file mode 100644
index 00000000000..d0010a86906
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_journal.ini
@@ -0,0 +1 @@
+journal=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_jsonp.ini b/test/legacy28/jstests/libs/config_files/disable_jsonp.ini
new file mode 100644
index 00000000000..82847f50b2b
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_jsonp.ini
@@ -0,0 +1 @@
+jsonp=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_jsonp.json b/test/legacy28/jstests/libs/config_files/disable_jsonp.json
new file mode 100644
index 00000000000..4d5477a8547
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_jsonp.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "JSONPEnabled" : false
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini b/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini
new file mode 100644
index 00000000000..f21b50f9513
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini
@@ -0,0 +1 @@
+moveParanoia=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noauth.ini b/test/legacy28/jstests/libs/config_files/disable_noauth.ini
new file mode 100644
index 00000000000..a65f909baf3
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noauth.ini
@@ -0,0 +1 @@
+noauth=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini b/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini
new file mode 100644
index 00000000000..b490f9038dd
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini
@@ -0,0 +1 @@
+noAutoSplit=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_nodur.ini b/test/legacy28/jstests/libs/config_files/disable_nodur.ini
new file mode 100644
index 00000000000..b0c73a48b30
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_nodur.ini
@@ -0,0 +1 @@
+nodur=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini b/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini
new file mode 100644
index 00000000000..52c4958da6e
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini
@@ -0,0 +1 @@
+nohttpinterface=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini b/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini
new file mode 100644
index 00000000000..79e428c492f
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini
@@ -0,0 +1 @@
+noIndexBuildRetry=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_nojournal.ini b/test/legacy28/jstests/libs/config_files/disable_nojournal.ini
new file mode 100644
index 00000000000..17172363d25
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini b/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini
new file mode 100644
index 00000000000..4696304134f
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini
@@ -0,0 +1 @@
+noMoveParanoia=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini b/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini
new file mode 100644
index 00000000000..471e83c3172
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini
@@ -0,0 +1 @@
+noobjcheck=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini b/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini
new file mode 100644
index 00000000000..08c78be3507
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini
@@ -0,0 +1 @@
+noprealloc=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_noscripting.ini b/test/legacy28/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini b/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini
new file mode 100644
index 00000000000..66da9f08391
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini
@@ -0,0 +1 @@
+nounixsocket=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_objcheck.ini b/test/legacy28/jstests/libs/config_files/disable_objcheck.ini
new file mode 100644
index 00000000000..bd19d026bbf
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_objcheck.ini
@@ -0,0 +1 @@
+objcheck=false
diff --git a/test/legacy28/jstests/libs/config_files/disable_rest_interface.json b/test/legacy28/jstests/libs/config_files/disable_rest_interface.json
new file mode 100644
index 00000000000..f9ad93a4f5d
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/disable_rest_interface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "RESTInterfaceEnabled" : false
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_auth.json b/test/legacy28/jstests/libs/config_files/enable_auth.json
new file mode 100644
index 00000000000..9f9cc84d107
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_auth.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "authorization" : "enabled"
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_autosplit.json b/test/legacy28/jstests/libs/config_files/enable_autosplit.json
new file mode 100644
index 00000000000..a0d4f8af1be
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_autosplit.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "autoSplit" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_httpinterface.json b/test/legacy28/jstests/libs/config_files/enable_httpinterface.json
new file mode 100644
index 00000000000..c87dabe125d
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_httpinterface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json b/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json
new file mode 100644
index 00000000000..362db08edd3
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "indexBuildRetry" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_journal.json b/test/legacy28/jstests/libs/config_files/enable_journal.json
new file mode 100644
index 00000000000..d75b94ccbc7
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_journal.json
@@ -0,0 +1,7 @@
+{
+ "storage" : {
+ "journal" : {
+ "enabled" : false
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_objcheck.json b/test/legacy28/jstests/libs/config_files/enable_objcheck.json
new file mode 100644
index 00000000000..b52be7382ed
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_objcheck.json
@@ -0,0 +1,5 @@
+{
+ "net" : {
+ "wireObjectCheck" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_paranoia.json b/test/legacy28/jstests/libs/config_files/enable_paranoia.json
new file mode 100644
index 00000000000..218646b1662
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_paranoia.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "archiveMovedChunks" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_prealloc.json b/test/legacy28/jstests/libs/config_files/enable_prealloc.json
new file mode 100644
index 00000000000..15ecefbb546
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_prealloc.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "preallocDataFiles" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_scripting.json b/test/legacy28/jstests/libs/config_files/enable_scripting.json
new file mode 100644
index 00000000000..e8f32f2c23c
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_scripting.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "javascriptEnabled" : true
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/enable_unixsocket.json b/test/legacy28/jstests/libs/config_files/enable_unixsocket.json
new file mode 100644
index 00000000000..660d21eb17f
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/enable_unixsocket.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "unixDomainSocket" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini b/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini
new file mode 100644
index 00000000000..43495fbd0bd
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini
@@ -0,0 +1 @@
+dur=
diff --git a/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini b/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini
new file mode 100644
index 00000000000..f750ac2e185
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini
@@ -0,0 +1 @@
+journal=
diff --git a/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini b/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini
new file mode 100644
index 00000000000..f1046df16a9
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini
@@ -0,0 +1 @@
+nodur=
diff --git a/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini b/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini
new file mode 100644
index 00000000000..737e5c28029
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=
diff --git a/test/legacy28/jstests/libs/config_files/set_component_verbosity.json b/test/legacy28/jstests/libs/config_files/set_component_verbosity.json
new file mode 100644
index 00000000000..69c200834a1
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/set_component_verbosity.json
@@ -0,0 +1,16 @@
+{
+ "systemLog" : {
+ "verbosity" : 2,
+ "component" : {
+ "accessControl" : {
+ "verbosity" : 0
+ },
+ "storage" : {
+ "verbosity" : 3,
+ "journaling" : {
+ "verbosity" : 5
+ }
+ }
+ }
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/set_profiling.json b/test/legacy28/jstests/libs/config_files/set_profiling.json
new file mode 100644
index 00000000000..944f0de1575
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/set_profiling.json
@@ -0,0 +1,5 @@
+{
+ "operationProfiling" : {
+ "mode" : "all"
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/set_replsetname.json b/test/legacy28/jstests/libs/config_files/set_replsetname.json
new file mode 100644
index 00000000000..522ca2b766f
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/set_replsetname.json
@@ -0,0 +1,5 @@
+{
+ "replication" : {
+ "replSetName" : "myconfigname"
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/set_shardingrole.json b/test/legacy28/jstests/libs/config_files/set_shardingrole.json
new file mode 100644
index 00000000000..71f92f122db
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/set_shardingrole.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "clusterRole" : "configsvr"
+ }
+}
diff --git a/test/legacy28/jstests/libs/config_files/set_verbosity.json b/test/legacy28/jstests/libs/config_files/set_verbosity.json
new file mode 100644
index 00000000000..47a1cce1b03
--- /dev/null
+++ b/test/legacy28/jstests/libs/config_files/set_verbosity.json
@@ -0,0 +1,5 @@
+{
+ "systemLog" : {
+ "verbosity" : 5
+ }
+}
diff --git a/test/legacy28/jstests/libs/crl.pem b/test/legacy28/jstests/libs/crl.pem
new file mode 100644
index 00000000000..275c9e2d91c
--- /dev/null
+++ b/test/legacy28/jstests/libs/crl.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:56:28 2014 GMT
+ Next Update: Aug 18 13:56:28 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 48:1b:0b:b1:89:f5:6f:af:3c:dd:2a:a0:e5:55:04:80:16:b4:
+ 23:98:39:bb:9f:16:c9:25:73:72:c6:a6:73:21:1d:1a:b6:99:
+ fc:47:5e:bc:af:64:29:02:9c:a5:db:15:8a:65:48:3c:4f:a6:
+ cd:35:47:aa:c6:c0:39:f5:a6:88:8f:1b:6c:26:61:4e:10:d7:
+ e2:b0:20:3a:64:92:c1:d3:2a:11:3e:03:e2:50:fd:4e:3c:de:
+ e2:e5:78:dc:8e:07:a5:69:55:13:2b:8f:ae:21:00:42:85:ff:
+ b6:b1:2b:69:08:40:5a:25:8c:fe:57:7f:b1:06:b0:72:ff:61:
+ de:21:59:05:a8:1b:9e:c7:8a:08:ab:f5:bc:51:b3:36:68:0f:
+ 54:65:3c:8d:b7:80:d0:27:01:3e:43:97:89:19:89:0e:c5:01:
+ 2c:55:9f:b6:e4:c8:0b:35:f8:52:45:d3:b4:09:ce:df:73:98:
+ f5:4c:e4:5a:06:ac:63:4c:f8:4d:9c:af:88:fc:19:f7:77:ea:
+ ee:56:18:49:16:ce:62:66:d1:1b:8d:66:33:b5:dc:b1:25:b3:
+ 6c:81:e9:d0:8a:1d:83:61:49:0e:d9:94:6a:46:80:41:d6:b6:
+ 59:a9:30:55:3d:5b:d3:5b:f1:37:ec:2b:76:d0:3a:ac:b2:c8:
+ 7c:77:04:78
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNTYyOFoXDTI0MDgxODEzNTYyOFqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEASBsLsYn1b6883Sqg5VUEgBa0I5g5u58WySVzcsam
+cyEdGraZ/EdevK9kKQKcpdsVimVIPE+mzTVHqsbAOfWmiI8bbCZhThDX4rAgOmSS
+wdMqET4D4lD9Tjze4uV43I4HpWlVEyuPriEAQoX/trEraQhAWiWM/ld/sQawcv9h
+3iFZBagbnseKCKv1vFGzNmgPVGU8jbeA0CcBPkOXiRmJDsUBLFWftuTICzX4UkXT
+tAnO33OY9UzkWgasY0z4TZyviPwZ93fq7lYYSRbOYmbRG41mM7XcsSWzbIHp0Iod
+g2FJDtmUakaAQda2WakwVT1b01vxN+wrdtA6rLLIfHcEeA==
+-----END X509 CRL-----
diff --git a/test/legacy28/jstests/libs/crl_client_revoked.pem b/test/legacy28/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..0b99d56936e
--- /dev/null
+++ b/test/legacy28/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,41 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:43:27 2014 GMT
+ Next Update: Aug 18 13:43:27 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+Revoked Certificates:
+ Serial Number: 02
+ Revocation Date: Aug 21 13:43:27 2014 GMT
+ Signature Algorithm: sha256WithRSAEncryption
+ 24:86:73:8d:7f:55:15:d0:d6:8a:47:53:cf:97:f7:e5:3d:0b:
+ 4a:ea:fb:02:6a:2e:79:c6:b1:38:b2:ac:f0:c0:64:47:b0:3e:
+ ad:4e:2e:94:e6:64:ed:79:34:bd:74:c0:d4:3d:b9:a1:bb:38:
+ 89:5c:02:6a:ad:6b:dc:3b:64:34:6a:2d:4c:90:36:82:95:0c:
+ 19:88:e2:a3:bf:8e:1b:56:98:37:32:87:ed:f0:bd:dd:e2:0d:
+ f9:80:dc:f2:a5:b4:ee:d9:bb:83:fe:b8:3a:13:e0:da:fc:04:
+ 77:fb:ce:f9:c5:2a:54:a7:f0:34:09:2a:b2:3d:46:1b:48:e6:
+ e8:16:c7:a1:3c:88:8c:72:cd:cc:53:dc:f8:54:63:1f:b9:8b:
+ ea:2c:e5:26:c5:b4:a4:9f:8b:e1:6c:85:9b:c6:63:6f:2f:ae:
+ 18:c5:6a:23:f0:58:27:85:5c:0f:01:04:da:d2:8b:de:9e:ab:
+ 46:00:22:07:28:e1:ef:46:91:90:06:58:95:05:68:67:58:6e:
+ 67:a8:0b:06:1a:73:d9:04:18:c9:a3:e4:e3:d6:94:a3:e1:5c:
+ e5:08:1b:b3:9d:ab:3e:ea:20:b1:04:e5:90:e1:42:54:b2:58:
+ bb:51:1a:48:87:60:b0:95:4a:2e:ce:a0:4f:8c:17:6d:6b:4c:
+ 37:aa:4d:d7
+-----BEGIN X509 CRL-----
+MIIB5DCBzQIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNDMyN1oXDTI0MDgxODEzNDMyN1owFDASAgECFw0xNDA4MjExMzQz
+MjdaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACSGc41/VRXQ
+1opHU8+X9+U9C0rq+wJqLnnGsTiyrPDAZEewPq1OLpTmZO15NL10wNQ9uaG7OIlc
+Amqta9w7ZDRqLUyQNoKVDBmI4qO/jhtWmDcyh+3wvd3iDfmA3PKltO7Zu4P+uDoT
+4Nr8BHf7zvnFKlSn8DQJKrI9RhtI5ugWx6E8iIxyzcxT3PhUYx+5i+os5SbFtKSf
+i+FshZvGY28vrhjFaiPwWCeFXA8BBNrSi96eq0YAIgco4e9GkZAGWJUFaGdYbmeo
+CwYac9kEGMmj5OPWlKPhXOUIG7Odqz7qILEE5ZDhQlSyWLtRGkiHYLCVSi7OoE+M
+F21rTDeqTdc=
+-----END X509 CRL-----
diff --git a/test/legacy28/jstests/libs/crl_expired.pem b/test/legacy28/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..c9b3abb05a7
--- /dev/null
+++ b/test/legacy28/jstests/libs/crl_expired.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Jul 21 19:45:56 2014 GMT
+ Next Update: Jul 21 20:45:56 2014 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:e8:6d:51:fc:0e:66:08:22:b2:4d:fb:da:7a:5f:4d:d1:a0:
+ 80:f0:18:f3:c5:ca:c7:05:6c:70:59:fa:d5:96:68:fa:c7:1d:
+ 7e:fb:53:3b:4a:8f:ed:bb:51:04:e8:fb:db:d7:b8:96:d9:e2:
+ 8d:bb:54:cc:11:60:c8:20:ea:81:28:5f:e1:eb:d6:8c:94:bf:
+ 42:e0:7f:a3:13:0c:76:05:f2:f0:34:98:a3:e8:64:74:4c:cb:
+ bf:39:bb:fa:d5:2d:72:02:d1:fa:56:15:59:12:b7:ff:a3:cc:
+ c9:d6:14:ca:4a:1e:0b:b4:47:cf:58:b0:e5:24:d2:21:71:0d:
+ 2d:09:77:5c:2f:ef:40:f8:74:90:03:cc:37:2e:ea:6a:25:59:
+ c0:bf:48:90:00:55:9c:db:bf:1f:f0:7b:b6:5a:90:94:b6:8d:
+ 7c:7d:bb:2d:11:5f:0c:f5:4a:9b:c5:ed:ab:e3:fd:35:c8:76:
+ 3b:2e:41:cb:df:76:b5:f4:e9:05:72:f6:56:7a:fc:34:07:d6:
+ a2:55:eb:7c:58:33:5b:9d:3e:b2:03:89:01:c6:d1:54:75:1a:
+ 5c:73:3f:5e:2e:fd:3b:38:ed:d4:e1:fa:ec:ff:84:f0:55:ee:
+ 83:e0:f0:13:97:e7:f0:55:8c:00:a3:1a:31:e4:31:9e:68:d0:
+ 6d:3e:81:b0
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDcyMTE5NDU1NloXDTE0MDcyMTIwNDU1NlqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEAFOhtUfwOZggisk372npfTdGggPAY88XKxwVscFn6
+1ZZo+scdfvtTO0qP7btRBOj729e4ltnijbtUzBFgyCDqgShf4evWjJS/QuB/oxMM
+dgXy8DSYo+hkdEzLvzm7+tUtcgLR+lYVWRK3/6PMydYUykoeC7RHz1iw5STSIXEN
+LQl3XC/vQPh0kAPMNy7qaiVZwL9IkABVnNu/H/B7tlqQlLaNfH27LRFfDPVKm8Xt
+q+P9Nch2Oy5By992tfTpBXL2Vnr8NAfWolXrfFgzW50+sgOJAcbRVHUaXHM/Xi79
+Ozjt1OH67P+E8FXug+DwE5fn8FWMAKMaMeQxnmjQbT6BsA==
+-----END X509 CRL-----
diff --git a/test/legacy28/jstests/libs/dur_checksum_bad_first.journal b/test/legacy28/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/test/legacy28/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/test/legacy28/jstests/libs/dur_checksum_bad_last.journal b/test/legacy28/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/test/legacy28/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/test/legacy28/jstests/libs/dur_checksum_good.journal b/test/legacy28/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/test/legacy28/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/test/legacy28/jstests/libs/expired.pem b/test/legacy28/jstests/libs/expired.pem
new file mode 100644
index 00000000000..e1d2ceb8de8
--- /dev/null
+++ b/test/legacy28/jstests/libs/expired.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfzCCAmegAwIBAgIBEDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzEwMTYwMDAwWhcNMTQwNzE2MTYwMDAwWjBtMRAwDgYDVQQD
+EwdleHBpcmVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAU
+BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG
+EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPFSQZlHvJpi3dmA
+1X5U1qaUN/O/EQy5IZ5Rw+cfFHWOZ84EsLZxehWyqDZRH49Rg06xSYdO2WZOopP8
+OnUVCLGL819K83ikZ5sCbvB/gKCSCenwveEN992gJfs70HaZfiJNC7/cFigSb5Jg
+5G77E1/Uml4hIThfYG2NbCsTuP/P4JLwuzCkfgEUWRbCioMPEpIpxQw2LCx5DCy6
+Llhct0Hp14N9dZ4nA1h1621wOckgGJHw9DXdt9rGzulY1UgOOPczyqT08CdpaVxK
+VzrJCcUxfUjhO4ukHz+LBFQY+ZEm+tVboDbinbiHxY24urP46/u+BwRvBvjOovJi
+NVUh5GsCAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEw
+DQYJKoZIhvcNAQEFBQADggEBAG3rRSFCSG3hilGK9SMtTpFnrquJNlL+yG0TP8VG
+1qVt1JGaDJ8YUc5HXXtKBeLnRYii7EUx1wZIKn78RHRdITo5OJvlmcwwh0bt+/eK
+u9XFgR3z35w5UPr/YktgoX39SOzAZUoorgNw500pfxfneqCZtcRufVvjtk8TUdlN
+lcd2HfIxtUHWJeTcVM18g0JdHMYdMBXDKuXOW9VWLIBC2G6nAL/8SZJtUaDllPb4
+NisuIGjfjGgNxMpEXn+sQjFTupAoJru21OtAgERWFJhKQ0hbO0kucEPKEfxHDBVG
+dKSRIl6b0XSDLfxEXPv5ZhdrK4KEw1dYYXySvIVXtn0Ys38=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA8VJBmUe8mmLd2YDVflTWppQ3878RDLkhnlHD5x8UdY5nzgSw
+tnF6FbKoNlEfj1GDTrFJh07ZZk6ik/w6dRUIsYvzX0rzeKRnmwJu8H+AoJIJ6fC9
+4Q333aAl+zvQdpl+Ik0Lv9wWKBJvkmDkbvsTX9SaXiEhOF9gbY1sKxO4/8/gkvC7
+MKR+ARRZFsKKgw8SkinFDDYsLHkMLLouWFy3QenXg311nicDWHXrbXA5ySAYkfD0
+Nd232sbO6VjVSA449zPKpPTwJ2lpXEpXOskJxTF9SOE7i6QfP4sEVBj5kSb61Vug
+NuKduIfFjbi6s/jr+74HBG8G+M6i8mI1VSHkawIDAQABAoIBAGAO1QvVkU6HAjX8
+4X6a+KJwJ2F/8aJ14trpQyixp2wv1kQce9bzjpwqdGjCm+RplvHxAgq5KTJfJLnx
+UbefOsmpoqOQ6x9fmdoK+uwCZMoFt6qGaJ63960hfVzm71D2Qk4XCxFA4xTqWb0T
+knpWuNyRfSzw1Q9ib7jL7X2sKRyx9ZP+1a41ia/Ko6iYPUUnRb1Ewo10alYVWVIE
+upeIlWqv+1DGfda9f34pGVh3ldIDh1LHqaAZhdn6sKtcgIUGcWatZRmQiA5kSflP
+VBpOI2c2tkQv0j5cPGwD7GGaJ2aKayHG0EwnoNmxCeR0Ay3MO0vBAsxn7Wy6yqrS
+EfkYhFkCgYEA/OA2AHFIH7mE0nrMwegXrEy7BZUgLRCRFWTjxwnCKFQj2Uo2dtYD
+2QQKuQWeiP+LD2nHj4n1KXuSJiB1GtmEF3JkYV4Wd7mPWEVNDHa0G8ZndquPK40s
+YSjh9u0KesUegncBFfIiwzxsk9724iaXq3aXOexc0btQB2xltRzj6/0CgYEA9E2A
+QU6pnCOzGDyOV7+TFr0ha7TXaMOb5aIVz6tJ7r5Nb7oZP9T9UCdUnw2Tls5Ce5tI
+J23O7JqwT4CudnWnk5ZtVtGBYA23mUryrgf/Utfg08hU2uRyq9LOxVaVqfV/AipN
+62GmfuxkK4PatOcAOhKqmS/zGfZqIg7V6rtX2ocCgYEAlY1ogpR8ij6mvfBgPmGr
+9nues+uBDwXYOCXlzCYKTN2OIgkQ8vEZb3RDfy9CllVDgccWfd6iPnlVcvUJLOrt
+gwxlL2x8ryvwCc1ahv+A/1g0gmtuDdy9HW0XTnjcFMWViKUm4DrGsl5+/GkF67PV
+SVOmllwifOthpjJGaHmAlmUCgYB6EFMZzlzud+PfIzqX20952Avfzd6nKL03EjJF
+rbbmA82bGmfNPfVHXC9qvRTWD76mFeMKWFJAY9XeE1SYOZb+JfYBn/I9dP0cKZdx
+nutSkCx0hK7pI6Wr9kt7zBRBdDj+cva1ufe/iQtPtrTLGHRDj9oPaibT/Qvwcmst
+umdd9wKBgQDM7j6Rh7v8AeLy2bw73Qtk0ORaHqRBHSQw87srOLwtfQzE92zSGMj+
+FVt/BdPgzyaddegKvJ9AFCPAxbA8Glnmc89FO7pcXn9Wcy+ZoZIF6YwgUPhPCp/4
+r9bKuXuQiutFbKyes/5PTXqbJ/7xKRZIpQCvxg2syrW3hxx8LIx/kQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/fts.js b/test/legacy28/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/test/legacy28/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/test/legacy28/jstests/libs/geo_near_random.js b/test/legacy28/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..248f5e49a6c
--- /dev/null
+++ b/test/legacy28/jstests/libs/geo_near_random.js
@@ -0,0 +1,101 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ var bulk = this.t.initializeUnorderedBulkOp();
+ for (var i=0; i<nPts; i++){
+ bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) });
+ }
+ assert.writeOK(bulk.execute());
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/test/legacy28/jstests/libs/host_ipaddr.js b/test/legacy28/jstests/libs/host_ipaddr.js
new file mode 100644
index 00000000000..7db1417e977
--- /dev/null
+++ b/test/legacy28/jstests/libs/host_ipaddr.js
@@ -0,0 +1,38 @@
+// Returns non-localhost ipaddr of host running the mongo shell process
+function get_ipaddr() {
+ // set temp path, if it exists
+ var path = "";
+ try {
+ path = TestData.tmpPath;
+ if (typeof path == "undefined") {
+ path = "";
+ } else if (path.slice(-1) != "/") {
+ // Terminate path with / if defined
+ path += "/";
+ }
+ }
+ catch (err) {}
+
+ var ipFile = path+"ipaddr.log";
+ var windowsCmd = "ipconfig > "+ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipAddr = null;
+ var hostType = null;
+
+ try {
+ hostType = getBuildInfo().sysInfo.split(' ')[0];
+
+ // os-specific methods
+ if (hostType == "windows") {
+ runProgram('cmd.exe', '/c', windowsCmd);
+ ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
+ } else {
+ runProgram('bash', '-c', unixCmd);
+ ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
+ }
+ }
+ finally {
+ removeFile(ipFile);
+ }
+ return ipAddr;
+}
diff --git a/test/legacy28/jstests/libs/key1 b/test/legacy28/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/test/legacy28/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/test/legacy28/jstests/libs/key2 b/test/legacy28/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/test/legacy28/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/test/legacy28/jstests/libs/localhostnameCN.pem b/test/legacy28/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e6aca6a217d
--- /dev/null
+++ b/test/legacy28/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDgTCCAmmgAwIBAgIBBTANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBvMRIwEAYDVQQD
+EwkxMjcuMC4wLjExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEW
+MBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNV
+BAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiqQNGgQggL8S
+LlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9wd0VNuD6+Ycg1mBbopO+M/K/ZWv8c
+7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9kVGRb2bNAfV2bC5/UnO1ulQdHoIB
+p3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8rwNNFvooMRg8yq8tq0qBkVhh85kct
+HHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqvI/Y5eIeZLhdIzAv37kolr8AuyqIR
+qcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZxoZN9Jv7x5LyiA+ijtQ+5aI/kMPqG
+nox+/bNFCQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAu
+MTANBgkqhkiG9w0BAQUFAAOCAQEAVJJNuUVzMRaft17NH6AzMSTiJxMFWoafmYgx
+jZnzA42XDPoPROuN7Bst6WVYDNpPb1AhPDco9qDylSZl0d341nHAuZNc84fD0omN
+Mbqieu8WseRQ300cbnS8p11c9aYpO/fNQ5iaYhGsRT7pnLs9MIgR468KVjY2xt49
+V0rshG6RxZj83KKuJd0T4X+5UeYz4B677y+SR0aoK2I2Sh+cffrMX2LotHc2I+JI
+Y9SDLvQT7chD9GzaWz634kmy3EEY0LreMm6AxhMOsr0lbZx5O8wLTScSjKARJ6OH
+nPxM1gYT07mkNmfyEnl1ChAN0MPgcLHQqEfe7x7ZQSbAv2gWfA==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAiqQNGgQggL8SLlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9w
+d0VNuD6+Ycg1mBbopO+M/K/ZWv8c7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9
+kVGRb2bNAfV2bC5/UnO1ulQdHoIBp3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8r
+wNNFvooMRg8yq8tq0qBkVhh85kctHHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqv
+I/Y5eIeZLhdIzAv37kolr8AuyqIRqcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZx
+oZN9Jv7x5LyiA+ijtQ+5aI/kMPqGnox+/bNFCQIDAQABAoIBAQAMiUT+Az2FJsHY
+G1Trf7Ba5UiS+/FDNNn7cJX++/lZQaOj9BSRVFzwuguw/8+Izxl+QIL5HlWDGupc
+tJICWwoWIuVl2S7RI6NPlhcEJF7hgzwUElnOWBfUgPEsqitpINM2e2wFSzHO3maT
+5AoO0zgUYK+8n9d74KT9CFcLqWvyS3iksK/FXfCZt0T1EoJ4LsDjeCTfVKqrku2U
++fCnZZYNkrgUI7Hku94EJfOh462V4KQAUGsvllwb1lfmR5NR86G6VX6oyMGctL5e
+1M6XQv+JQGEmAe6uULtCUGh32fzwJ9Un3j2GXOHT0LWrVc5iLuXwwzQvCGaMYtKm
+FAIDpPxhAoGBAMtwzpRyhf2op/REzZn+0aV5FWKjeq69Yxd62RaOf2EetcPwvUOs
+yQXcP0KZv15VWU/XhZUmTkPf52f0YHV/b1Sm6wUOiMNQ4XpnRj2THf0N7RS4idMm
+VwtMf1pxqttxQVKPpOvPEiTyIh2Nx/juyfD4CWkOVNTvOCd1w+av6ukNAoGBAK51
+gIXDuwJ2e5h3IJyewN/HOZqlgPKyMjnACaeXQ5wPJSrz4+UkJkuXT2dYKhv6u7K/
+GtucTdvBIJeq61+LjjkYk7OVDzoqP/uWU7p1y7gU9LZq+7tgq7r8cgeaC3IBQe7X
+jdFPEy1+zAEBh6MfFjnLZ2Kop9qbH3cNih/g9pTtAoGBAJ8dmdUtRXNByCsa7Rv2
+243qiDlf14J4CdrBcK1dwm75j/yye7VEnO2Cd8/lZHGpm3MBBC/FiA06QElkL1V2
+2GKDMun/liP9TH1p7NwYBqp3i+ha9SE6qXXi3PCmWpXLnOWwB7OPf4d6AgjPbYpb
+aYKY3PNYDC2G9IqYZyI0kSy5AoGBAJ5Fe5PfPom9c+OeL7fnTpO16kyiWZnUkDxU
+PG4OjQfHtbCCEv6PDS8G1sKq+Yjor+A5/+O8qeX0D92I8oB720txQI5rbKUYL3PP
+raY7t9YJLPlRlY8o5KN+4vSCjF+hRG+qnr6FPqDHp8xB1wvl6AQGxIR8/csVcDZR
+0j2ZmhsBAoGAO1Cpk/hWXOLAhSj8P8Q/+3439HEctTZheVBd8q/TtdwXocaZMLi8
+MXURuVTw0GtS9TmdqOFXzloFeaMhJx6TQzZ2aPcxu95b7RjEDtVHus3ed2cSJ2El
+AuRvFT2RCVvTu1mM0Ti7id+d8QBcpbIpPjNjK2Wxir/19gtEawlqlkA=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/localhostnameSAN.pem b/test/legacy28/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..480300f29e1
--- /dev/null
+++ b/test/legacy28/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBBjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB5MRwwGgYDVQQD
+ExNzYW50ZXN0aG9zdG5hbWUuY29tMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoT
+B01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZ
+b3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJKOLTNEPv08IVmhfkv6Xq1dT6pki76ggpJ7UpwdUSsTsWDKO2o1c7wnzEjfhYQ+
+CtlEvbYyL3O7f8AaO15WJdi53SMuWS+QfCKs6b0symYbinSXlZGb4oZYFSrodSxH
++G8u+TUxyeaXgTHowMWArmTRi2LgtIwXwwHJawfhFDxji3cSmLAr5YQMAaXUynq3
+g0DEAGMaeOlyn1PkJ2ZfJsX2di+sceKb+KK1xT+2vUSsvnIumBCYqMhU6y3WjBWK
+6WrmOcsldWo4IcgyzwVRlZiuuYoe6ZsxZ4nMyTdYebALPqgkt8QVXqkgcjWK8F18
+nuqWIAn1ISTjj73H4cnzYv0CAwEAAaM8MDowOAYDVR0RBDEwL4INKi5leGFtcGxl
+LmNvbYIJMTI3LjAuMC4xgghtb3JlZnVuIYIJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+BQUAA4IBAQA5M3U4wvQYI3jz/+Eh4POrJAs9eSRGkUhz1lP7D6Fcyp+BbbXB1fa9
+5qpD4bp1ZoDP2R2zca2uwwfd3DTWPbmwFMNqs2D7d0hgX71Vg9DCAwExFjoeRo44
+cCE9kakZtE3kT/tiH6SpYpnBa3dizxTmiY48z212Pw813SSXSPMN1myx5sMJof5I
+whJNQhSQOw6WHw5swZJZT4FkzxjQMrTWdF6r0d5EU9K2WWk5DTwq4QaysplB5l0H
+8qm+fnC6xI+2qgqMO9xqc6qMtHHICXtdUOup6wj/bdeo7bAQdVDyKlFKiYivDXvO
+RJNp2cwsBgxU+qdrtOLp7/j/0R3tUqWb
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAko4tM0Q+/TwhWaF+S/perV1PqmSLvqCCkntSnB1RKxOxYMo7
+ajVzvCfMSN+FhD4K2US9tjIvc7t/wBo7XlYl2LndIy5ZL5B8IqzpvSzKZhuKdJeV
+kZvihlgVKuh1LEf4by75NTHJ5peBMejAxYCuZNGLYuC0jBfDAclrB+EUPGOLdxKY
+sCvlhAwBpdTKereDQMQAYxp46XKfU+QnZl8mxfZ2L6xx4pv4orXFP7a9RKy+ci6Y
+EJioyFTrLdaMFYrpauY5yyV1ajghyDLPBVGVmK65ih7pmzFniczJN1h5sAs+qCS3
+xBVeqSByNYrwXXye6pYgCfUhJOOPvcfhyfNi/QIDAQABAoIBADqGMkClwS2pJHxB
+hEjc+4/pklWt/ywpttq+CpgzEOXN4GiRebaJD+WUUvzK3euYRwbKb6PhWJezyWky
+UID0j/qDBm71JEJdRWUnfdPAnja2Ss0Sd3UFNimF5TYUTC5ZszjbHkOC1WiTGdGP
+a+Oy5nF2SF4883x6RLJi963W0Rjn3jIW9LoLeTgm9bjWXg3iqonCo3AjREdkR/SG
+BZaCvulGEWl/A3a7NmW5EGGNUMvzZOxrqQz4EX+VnYdb7SPrH3pmQJyJpAqUlvD5
+y7pO01fI0wg9kOWiIR0vd3Gbm9NaFmlH9Gr2oyan3CWt1h1gPzkH/V17rZzVYb5L
+RnjLdyECgYEA6X16A5Gpb5rOVR/SK/JZGd+3z52+hRR8je4WhXkZqRZmbn2deKha
+LKZi1eVl11t8zitLg/OSN1uZ/873iESKtp/R6vcGcriUCd87cDh7KTyW/7ZW5jdj
+o6Y3Liai3Xrf6dL+V2xYw964Map9oK9qatYw/L+Ke6b9wbGi+hduf1kCgYEAoK8n
+pzctajS3Ntmk147n4ZVtcv78nWItBNH2B8UaofdkBlSRyUURsEY9nA34zLNWI0f3
+k59+cR13iofkQ0rKqJw1HbTTncrSsFqptyEDt23iWSmmaU3/9Us8lcNGqRm7a35V
+Km0XBFLnE0mGFGFoTpNt8oiR4WGASJPi482xkEUCgYEAwPmQn2SDCheDEr2zAdlR
+pN3O2EwCi5DMBK3TdUsKV0KJNCajwHY72Q1HQItQ6XXWp7sGta7YmOIfXFodIUWs
+85URdMXnUWeWCrayNGSp/gHytrNoDOuYcUfN8VnDX5PPfjyBM5X7ox7vUzUakXSJ
+WnVelXZlKR9yOOTs0xAMpjkCgYAbF61N6mXD5IOHwgajObsrM/CyVP/u4WDJ0UT0
+Zm1pJbc9wgCauQSUfiNhLpHmoc5CQJ4jy96b3+YJ+4OnPPMSntPt4FFV557CkWbQ
+M8bWpLZnZjhixP4FM9xRPA2r8WTCaRifAKnC1t+TRvBOe2YE6aK+I/zEzZW9pwG4
+ezQXKQKBgQAIBSJLa6xWbfbzqyPsvmRNgiEjIamF7wcb1sRjgqWM6sCzYwYv8f5v
+9C4YhNXEn+c5V2KevgYeg6iPSQuzEAfJx64QV7JD8kEBf5GNETnuW45Yg7KwKPD6
+ZCealfpy/o9iiNqbWqDNND91pj2/g5oZnac3misJg5tGCJbJsBFXag==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/mockkrb5.conf b/test/legacy28/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/test/legacy28/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/test/legacy28/jstests/libs/mockservice.keytab b/test/legacy28/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/test/legacy28/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/test/legacy28/jstests/libs/mockuser.keytab b/test/legacy28/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/test/legacy28/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/test/legacy28/jstests/libs/not_yet_valid.pem b/test/legacy28/jstests/libs/not_yet_valid.pem
new file mode 100644
index 00000000000..7c021c0becd
--- /dev/null
+++ b/test/legacy28/jstests/libs/not_yet_valid.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIBETANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMjAwNzE3MTYwMDAwWhcNMjUwNzE3MTYwMDAwWjBzMRYwFAYDVQQD
+Ew1ub3RfeWV0X3ZhbGlkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdv
+REIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQsw
+CQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2gF+Fo
+CeBKVlPyDAaEA7cjK75CxnzQy+oqw1j/vcfe/CfKL9MvDDXauR/9v1RRlww5zlxQ
+XJJtcMJtxN1EpP21cHrHCpJ/fRsCdMfJdD9MO6gcnclEI0Odwy5YI/57rAgxEuDC
+7z4d+M6z7PLq8DIwvRuhAZVTszeyTsCCkwfTJ/pisD2Ace75pS37t/ttQp+kQ+Vl
+QrfccHYxrScQ9i0JqBfrTULDl6ST76aINOaFKWqrLLkRUvE6pEkL/iP6xXUSKOsm
+uyc0yb0PK5Y/IVdrzwWUkabWEM27RAMH+CAx2iobk6REj0fsGySBzT2CaETZPjck
+vn/LYKqr+CvYjc8CAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcu
+MC4wLjEwDQYJKoZIhvcNAQEFBQADggEBADw37jpmhj/fgCZdF1NrDKLmWxb4hovQ
+Y9PRe6GsBOc1wH8Gbe4UkYAE41WUuT3xW9YpfCHLXxC7da6dhaBISWryX7n72abM
+xbfAghV3je5JAmC0E/OzQz8tTgENxJN/c4oqCQ9nVOOLjwWiim5kF0/NY8HCc/Sg
+OG9IdseRX72CavDaPxcqR9/5KKY/pxARMeyy3/D0FIB1Fwu5h9vjHEi5fGOqcizf
+S1KHfzAmTxVtjw6HWRGKmkPX0W0/lURWVkKRxvC8KkJIeKx3fl9U1PqCw0AVi5d/
+whYn4qHNFFp4OiVzXq3b5YoBy0dlHUePCIPT2GkGlV4NQKosZMJUkKo=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAzaAX4WgJ4EpWU/IMBoQDtyMrvkLGfNDL6irDWP+9x978J8ov
+0y8MNdq5H/2/VFGXDDnOXFBckm1wwm3E3USk/bVwescKkn99GwJ0x8l0P0w7qByd
+yUQjQ53DLlgj/nusCDES4MLvPh34zrPs8urwMjC9G6EBlVOzN7JOwIKTB9Mn+mKw
+PYBx7vmlLfu3+21Cn6RD5WVCt9xwdjGtJxD2LQmoF+tNQsOXpJPvpog05oUpaqss
+uRFS8TqkSQv+I/rFdRIo6ya7JzTJvQ8rlj8hV2vPBZSRptYQzbtEAwf4IDHaKhuT
+pESPR+wbJIHNPYJoRNk+NyS+f8tgqqv4K9iNzwIDAQABAoIBAFWTmjyyOuIArhrz
+snOHv7AZUBw32DmcADGtqG1Cyi4DrHe22t6ORwumwsMArP8fkbiB2lNrEovSRkp0
+uqjH5867E1vVuJ2tt1hlVkrLmbi6Nl3JwxU/aVm7r7566kgAGmGyYsPt/PmiKamF
+Ekkq49pPlHSKNol6My0r5UCTVzO6uwW7dAa4GOQRI7bM7PVlxRVVeNzPH3yOsTzk
+smrkRgf8HbjtY7m/EHG281gu14ZQRCqzLshO2BtWbkx9dMXnNU5dRRaZ8Pe8XN0Z
+umsStcX6So6VFAqlwknZTi1/sqyIuQLfE+S9DocVQkvKFUgKpFddK8Nmqc8xPCKt
+UwR9hEECgYEA9kZ5KmUbzxQrF8Kn9G18AbZ/Cf6rE9fhs/J8OGcuuJ9QTjPO7pxV
+T7lGrIOX3dVu3+iHrYXZUZv+UTOePWx+ghqJ8ML7RdVsxAWMqh+1J0eBJKIdc9mt
+0hGkLEyyBbAlfNmvw8JugTUeZH2gA+VK9HoMTAjD+LvH164rrktauKECgYEA1b6z
+lZypAbAqnuCndcetcgatdd/bYNH5WWTgdZHqInt3k94EsUEHFNMQUbO+FNkOJ4qJ
+Jp7xrqkOUX+MPrzV5XYVapamlht9gvUtyxGq7DYndlq4mIsN5kReH++lqONBnWoG
+ZlbxvadkvPo+bK003hsl+E4F8X7xUssGGLvygG8CgYEAm/yLJkUgVgsqOER86R6n
+mtYipQv/A/SK6tU9xOPl/d46mS3LderjRjnN/9rhyAo1zfCUb14GBeDONlSBd9pO
+Ts3MbQiy6sqBt67kJ6UpspVhwPhFu2k25YVy/PQfFec591hSMaXnJEOm2nOPdKg4
+z5y2STqMFfGqZHvXAvCLp8ECgYA8oVGTmNKf9fbBBny5/iAG/jnp+8vg1O7kGqdI
+8lD14wvyV8IA/a8iixRP+Kpsg31uXe+1ktR/dNjo6UNA8JPD+RDuITmzzqx1n1KU
+DbjsNBhRjD5cluUkcjQ43uOg2oXcPxz9nqAH6hm7OUjHzwH2FsFYg9lPvXB6ybg6
+/+Uz5QKBgBxvTtLsZ3Cvvb3qezn4DdpLjlsrT6HWaTGqwEx8NYVBTFX/lT8P04tv
+NqFuQsDJ4gw0AZF7HqF49qdpnHEJ8tdHgBc/xDLFUMuKjON4IZtr0/j407K6V530
+m4q3ziHOu/lORDcZTz/YUjEzT8r7Qiv7QusWncvIWEiLSCC2dvvb
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/parallelTester.js b/test/legacy28/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..8c44d2df553
--- /dev/null
+++ b/test/legacy28/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/test/legacy28/jstests/libs/password_protected.pem b/test/legacy28/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..25e47bc2402
--- /dev/null
+++ b/test/legacy28/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBCTANBgkqhkiG9w0BAQUFADB4MRswGQYDVQQDExJwYXNz
+d29yZF9wcm90ZWN0ZWQxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29E
+QjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJ
+BgNVBAYTAlVTMB4XDTE0MDcxNzE2MDAwMFoXDTIwMDcxNzE2MDAwMFoweDEbMBkG
+A1UEAxMScGFzc3dvcmRfcHJvdGVjdGVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNV
+BAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5l
+dyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALT4r3Hcou2auIOHeihBSjk4bKQTVqI6r/stnkul359SRfKuzVA9gMQaRRDi
+MJoxczHJzS2FX+wElzBt2EUhfu3qpUJ4gJw7H4WjLx+mNnj/+6b4HUO4eRzH5hTE
+A+qgDH40qYjFDEjiARvybWo3IlDLeI/uFwlyUj5PZBUBc1LBBzNtCBfJ2MmHLhIx
+jzTFhkJZll673LL6BPHtJclXCazqKUZDLqObW4Ei6X4hdBOdC8v8Q6GMgC4BxLe0
+wsOpKYYeM3il4BtfiqDQB5ZPG0lgo1Y7OOyFHFXBA7oNkK8lykhdyH4iLt5L9mWo
+VKyZ79VqSODFuCqWo8n8kUTgA/0CAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAntxk8a0HcuPG8Fdjckp6WL+HKQQnUKdvSk06rPF0SHpN
+Ma4eZcaumROdtAYnPKvtbpq3DRCQlj59dlWPksEcYiXqf56TgcyAz5K5g5z9RbFi
+ArvAXJNRcDz1080NWGBUTPYyiKWR3PhtlYhJZ4r7fQIWLv4mifXHViw2roXXhsAY
+ubk9HOtrqE7x6NJXgR24aybxqI6TfAKfM+LJNtMwMFrPC+GHnhqMOs/jHJS38NIB
+TrKA63TdpYUroVu23/tGLQaJz352qgF4Di91RkUfnI528goj57pX78H8KRsSNVvs
+KHVNrxtZIez+pxxjBPnyfCH81swkiAPG9fdX+Hcu5A==
+-----END CERTIFICATE-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6y3b7IxcANECAggA
+MB0GCWCGSAFlAwQBAgQQrTslOKC0GZZwq48v7niXYQSCBNBNKsTN7fyw60/EEDH0
+JUgxL83Wfb7pNP97/lV5qiclY1mwcKz44kXQaesFTzhiwzAMOpbI/ijEtsNU25wV
+wtTgjAC3Em/+5/ygrmAu7hgacIRssspovmsgw029E9iOkyBd1VIrDVMi7HLHf0iU
+2Zq18QF20az2pXNMDipmVJkpc9NvjSdqka5+375pJuisspEWCDBd11K10jzCWqB5
+q3Rm1IIeq+mql6KT1rJcUoeE0facDc9GDYBiF/MfIKQ3FrZy/psqheCfL1UDUMyc
+mnm9GJO5+bCuHkg8ni0Zo5XXsf2VEFt0yt6lSucoOP43flucQaHnFKcn+5DHjDXv
+S6Eb5wEG9qWtzwWy/9DfRbkj6FxUgT3SFgizo/uLmdqFCJCnYkHUD1OuYCDmoIXP
+VTinwgK4lO/vrGfoPQrgJmdlnwHRWYjlB8edMCbmItaj2Esh3FBS12y976+UT0Sk
+8n5HsZAEYScDyNArVhrLUZRgF+r+bgZ28TDFO0MISPCAbZjhvq6lygS3dEmdTUW3
+cFDe1deNknWxZcv4UpJW4Nq6ckxwXBfTB1VFzjp7/vXrK/Sd9t8zi6vKTO8OTqc4
+KrlLXBgz0ouP/cxhYDykUrKXE2Eb0TjeAN1txZWo3fIFzXUvDZCphQEZNUqsFUxH
+86V2lwqVzKrFq6UpTgKrfTw/2ePQn9dQgd7iFWDTWjRkbzA5aAgTSVP8xQRoIOeQ
+epXtP9202kEz3h28SZYK7QBOTTX9xNmV/dzDTsi9nXZ6KtsP/aGFE5hh95jvESx/
+wlOBAPW4HR33rSYalvQPE7RjjLZHOKuYIllUBGlTOfgdA+WUXR3KxiLNPdslPBPV
++O6aDyerhWoQwE7TFwhP/FpxL/46hOu4iq4fgqfjddBTq8z5jG3c3zzogDjoDzBF
+LEQDcbenUCGbEQ7zxXsXtr3QinJ+aAejDO38hp1h9ROb5LF53/9H2j/16nby/jPX
+7kp2weRSKGJ0B6AVuS9pTsQz4+E3icsIgBWSU6qtcUz2GO2QxnFuvT9LEVnyMNN2
+IKMIEKi2FsUMddHGXLULTANlzUMocdHrd5j81eqcFPhMOFOiHpgwiwxqZyBYOLRl
+Fe7x5dLVWoLgjJagZj8uYnJbExDsfFLjEx8p4Z+rejJIC5CqZLbz9sDgCtIL+92k
++x4mlT1Rfmz9pU+RQqik83nFFRBGWxeW9iWWEgocWtmezvnK6E241v78zkqxNkvF
+JJo7BsBw7DiEHEfLhBZYuqV2q6+kwqgYrzyGIwAJkBGrkYfalVzgR+3/uN04h005
+M3jQRpSkDVGYr3JKEAlh3Sc+JD9VPbu6/RXNwy5mY67UCgWGaFwRqJE3DC9aKfNC
+OET8m8+8oQgFzhw3pNpENsgwR+Sx3K4q0GI3YwxT02pieBFNQaw53O3B3TtoCjkk
+UsuyIWqcLonwo4I3z0kjU3gEFN+0m4E4/A1DNt0J3rsKN+toCk1FqbxQg9xTZzXu
+hYmA3HMMwugzXmCanqBhmMsniPg+dRxCIfiHZhLuEpjKxZWcMWcW4M6l/wbM+LbE
+oDcTuI9ezfPTZ3xA8hNIHBT3MhuI7EJQnvKKvJDJeyX5sAtmSsSFqhEr8QZD8RgV
+5H9eOyUdfcWxLlstcq982V0oGg==
+-----END ENCRYPTED PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/server.pem b/test/legacy28/jstests/libs/server.pem
new file mode 100644
index 00000000000..df2b49163d6
--- /dev/null
+++ b/test/legacy28/jstests/libs/server.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfjCCAmagAwIBAgIBBzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBsMQ8wDQYDVQQD
+EwZzZXJ2ZXIxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEWMBQG
+A1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNVBAYT
+AlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp76KJeDczBqjSPJj
+5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMqwbX0D7hC2r3kAgccMyFoNIudPqIXfXVd
+1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4bhke6R8JRC3O5aMKIAbaiQUAI1Nd8LxIt
+LGvH+ia/DFza1whgB8ym/uzVQB6igOifJ1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEb
+R9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSzU/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHm
+r4de8jhW8wivmjTIvte33jlLibQ5nYIHrlpDLEwlzvDGaIio+OfWcgs2WuPk98MU
+tht0IQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAN
+BgkqhkiG9w0BAQUFAAOCAQEANoYxvVFsIol09BQA0fwryAye/Z4dYItvKhmwB9VS
+t99DsmJcyx0P5meB3Ed8SnwkD0NGCm5TkUY/YLacPP9uJ4SkbPkNZ1fRISyShCCn
+SGgQUJWHbCbcIEj+vssFb91c5RFJbvnenDkQokRvD2VJWspwioeLzuwtARUoMH3Y
+qg0k0Mn7Bx1bW1Y6xQJHeVlnZtzxfeueoFO55ZRkZ0ceAD/q7q1ohTXi0vMydYgu
+1CB6VkDuibGlv56NdjbttPJm2iQoPaez8tZGpBo76N/Z1ydan0ow2pVjDXVOR84Y
+2HSZgbHOGBiycNw2W3vfw7uK0OmiPRTFpJCmewDjYwZ/6w==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAp76KJeDczBqjSPJj5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMq
+wbX0D7hC2r3kAgccMyFoNIudPqIXfXVd1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4b
+hke6R8JRC3O5aMKIAbaiQUAI1Nd8LxItLGvH+ia/DFza1whgB8ym/uzVQB6igOif
+J1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEbR9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSz
+U/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHmr4de8jhW8wivmjTIvte33jlLibQ5nYIH
+rlpDLEwlzvDGaIio+OfWcgs2WuPk98MUtht0IQIDAQABAoIBACgi1ilECXCouwMc
+RDzm7Jb7Rk+Q9MVJ79YlG08Q+oRaNjvAzE03PSN5wj1WjDTUALJXPvi7oy82V4qE
+R6Q6Kvbv46aUJpYzKFEk2dw7ACpSLa1LNfjGNtMusnecA/QF/8bxLReRu8s5mBQn
+NDnZvCqllLbfjNlAvsF+/UIn5sqFZpAZPMtPwkTAeh5ge8H9JvrG8y8aXsiFGAhV
+Z7tMZyn8wPCUrRi14NLvVB4hxM66G/tuTp8r9AmeTU+PV+qbCnKXd+v0IS52hvX9
+z75OPfAc66nm4bbPCapb6Yx7WaewPXXU0HDxeaT0BeQ/YfoNa5OT+ZOX1KndSfHa
+VhtmEsECgYEA3m86yYMsNOo+dkhqctNVRw2N+8gTO28GmWxNV9AC+fy1epW9+FNR
+yTQXpBkRrR7qrd5mF7WBc7vAIiSfVs021RMofzn5B1x7jzkH34VZtlviNdE3TZhx
+lPinqo0Yy3UEksgsCBJFIofuCmeTLk4ZtqoiZnXr35RYibaZoQdUT4kCgYEAwQ6Y
+xsKFYFks1+HYl29kR0qUkXFlVbKOhQIlj/dPm0JjZ0xYkUxmzoXD68HrOWgz7hc2
+hZaQTgWf+8cRaZNfh7oL+Iglczc2UXuwuUYguYssD/G6/ZPY15PhItgCghaU5Ewy
+hMwIJ81NENY2EQTgk/Z1KZitXdVJfHl/IPMQgdkCgYASdqkqkPjaa5dDuj8byO8L
+NtTSUYlHJbAmjBbfcyTMG230/vkF4+SmDuznci1FcYuJYyyWSzqzoKISM3gGfIJQ
+rYZvCSDiu4qGGPXOWANaX8YnMXalukGzW/CO96dXPB9lD7iX8uxKMX5Q3sgYz+LS
+hszUNHWf2XB//ehCtZkKAQKBgQCxL2luepeZHx82H9T+38BkYgHLHw0HQzLkxlyd
+LjlE4QCEjSB4cmukvkZbuYXfEVEgAvQKVW6p/SWhGkpT4Gt8EXftKV9dyF21GVXQ
+JZnhUOcm1xBsrWYGLXYi2agrpvgONBTlprERfq5tdnz2z8giZL+RZswu45Nnh8bz
+AcKzuQKBgQCGOQvKvNL5XKKmws/KRkfJbXgsyRT2ubO6pVL9jGQG5wntkeIRaEpT
+oxFtWMdPx3b3cxtgSP2ojllEiISk87SFIN1zEhHZy/JpTF0GlU1qg3VIaA78M1p2
+ZdpUsuqJzYmc3dDbQMepIaqdW4xMoTtZFyenUJyoezz6eWy/NlZ/XQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/servers.js b/test/legacy28/jstests/libs/servers.js
new file mode 100755
index 00000000000..30734822845
--- /dev/null
+++ b/test/legacy28/jstests/libs/servers.js
@@ -0,0 +1,957 @@
+// Wrap whole file in a function to avoid polluting the global namespace
+(function() {
+
+_parsePath = function() {
+ var dbpath = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--dbpath" )
+ dbpath = arguments[ i + 1 ];
+
+ if ( dbpath == "" )
+ throw Error("No dbpath specified");
+
+ return dbpath;
+}
+
+_parsePort = function() {
+ var port = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--port" )
+ port = arguments[ i + 1 ];
+
+ if ( port == "" )
+ throw Error("No port specified");
+ return port;
+}
+
+connectionURLTheSame = function( a , b ){
+
+ if ( a == b )
+ return true;
+
+ if ( ! a || ! b )
+ return false;
+
+ if( a.host ) return connectionURLTheSame( a.host, b )
+ if( b.host ) return connectionURLTheSame( a, b.host )
+
+ if( a.name ) return connectionURLTheSame( a.name, b )
+ if( b.name ) return connectionURLTheSame( a, b.name )
+
+ if( a.indexOf( "/" ) < 0 && b.indexOf( "/" ) < 0 ){
+ a = a.split( ":" )
+ b = b.split( ":" )
+
+ if( a.length != b.length ) return false
+
+ if( a.length == 2 && a[1] != b[1] ) return false
+
+ if( a[0] == "localhost" || a[0] == "127.0.0.1" ) a[0] = getHostName()
+ if( b[0] == "localhost" || b[0] == "127.0.0.1" ) b[0] = getHostName()
+
+ return a[0] == b[0]
+ }
+ else {
+ var a0 = a.split( "/" )[0]
+ var b0 = b.split( "/" )[0]
+ return a0 == b0
+ }
+}
+
+assert( connectionURLTheSame( "foo" , "foo" ) )
+assert( ! connectionURLTheSame( "foo" , "bar" ) )
+
+assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) )
+assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) )
+
+createMongoArgs = function( binaryName , args ){
+ var fullArgs = [ binaryName ];
+
+ if ( args.length == 1 && isObject( args[0] ) ){
+ var o = args[0];
+ for ( var k in o ){
+ if ( o.hasOwnProperty(k) ){
+ if ( k == "v" && isNumber( o[k] ) ){
+ var n = o[k];
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10;
+ var temp = "-";
+ while ( n-- > 0 ) temp += "v";
+ fullArgs.push( temp );
+ }
+ }
+ else {
+ fullArgs.push( "--" + k );
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] );
+ }
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ return fullArgs;
+}
+
+
+MongoRunner = function(){}
+
+MongoRunner.dataDir = "/data/db"
+MongoRunner.dataPath = "/data/db/"
+MongoRunner.usedPortMap = {}
+
+MongoRunner.VersionSub = function(regex, version) {
+ this.regex = regex;
+ this.version = version;
+}
+
+// These patterns allow substituting the binary versions used for each
+// version string to support the dev/stable MongoDB release cycle.
+MongoRunner.binVersionSubs = [ new MongoRunner.VersionSub(/^latest$/, ""),
+ new MongoRunner.VersionSub(/^oldest-supported$/, "1.8"),
+ // To-be-updated when 2.8 becomes available
+ new MongoRunner.VersionSub(/^last-stable$/, "2.6"),
+ // Latest unstable and next stable are effectively the
+ // same release
+ new MongoRunner.VersionSub(/^2\.7(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^2\.8(\..*){0,1}/, "") ];
+
+MongoRunner.getBinVersionFor = function(version) {
+
+ // If this is a version iterator, iterate the version via toString()
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.toString();
+ }
+
+ // No version set means we use no suffix, this is *different* from "latest"
+ // since latest may be mapped to a different version.
+ if (version == null) version = "";
+ version = version.trim();
+ if (version === "") return "";
+
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.regex.test(version)) {
+ version = sub.version;
+ }
+ }
+
+ return version;
+}
+
+MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+
+ versionA = MongoRunner.getBinVersionFor(versionA);
+ versionB = MongoRunner.getBinVersionFor(versionB);
+
+ if (versionA === "" || versionB === "") {
+ return versionA === versionB;
+ }
+
+ return versionA.startsWith(versionB) ||
+ versionB.startsWith(versionA);
+}
+
+MongoRunner.logicalOptions = { runId : true,
+ pathOpts : true,
+ remember : true,
+ noRemember : true,
+ appendOptions : true,
+ restart : true,
+ noCleanData : true,
+ cleanData : true,
+ startClean : true,
+ forceLock : true,
+ useLogFiles : true,
+ logFile : true,
+ useHostName : true,
+ useHostname : true,
+ noReplSet : true,
+ forgetPort : true,
+ arbiter : true,
+ noJournalPrealloc : true,
+ noJournal : true,
+ binVersion : true,
+ waitForConnect : true }
+
+MongoRunner.toRealPath = function( path, pathOpts ){
+
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {}
+ path = path.replace( /\$dataPath/g, MongoRunner.dataPath )
+ path = path.replace( /\$dataDir/g, MongoRunner.dataDir )
+ for( key in pathOpts ){
+ path = path.replace( RegExp( "\\$" + RegExp.escape(key), "g" ), pathOpts[ key ] )
+ }
+
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
+
+ if( ! path.match( /^(\/|\\|[A-Za-z]:)/ ) ){
+ if( path != "" && ! path.endsWith( "/" ) )
+ path += "/"
+
+ path = MongoRunner.dataPath + path
+ }
+
+ return path
+
+}
+
+MongoRunner.toRealDir = function( path, pathOpts ){
+
+ path = MongoRunner.toRealPath( path, pathOpts )
+
+ if( path.endsWith( "/" ) )
+ path = path.substring( 0, path.length - 1 )
+
+ return path
+}
+
+MongoRunner.toRealFile = MongoRunner.toRealDir
+
+MongoRunner.nextOpenPort = function(){
+
+ var i = 0;
+ while( MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] ) i++;
+ MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] = true
+
+ return 27000 + i
+
+}
+
+/**
+ * Returns an iterator object which yields successive versions on toString(), starting from a
+ * random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on toString()
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+MongoRunner.versionIterator = function( arr, isRandom ){
+
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if( typeof arr == "string" ) return arr
+ if( arr.isVersionIterator ) return arr
+
+ if (isRandom == undefined) isRandom = false;
+
+ // Starting pos
+ var i = isRandom ? parseInt( Random.rand() * arr.length ) : 0;
+
+ return new MongoRunner.versionIterator.iterator(i, arr);
+}
+
+MongoRunner.versionIterator.iterator = function(i, arr) {
+
+ this.toString = function() {
+ i = ( i + 1 ) % arr.length
+ print( "Returning next version : " + i +
+ " (" + arr[i] + ") from " + tojson( arr ) + "..." );
+ return arr[ i ]
+ }
+
+ this.isVersionIterator = true;
+
+}
+
+/**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+MongoRunner.arrOptions = function( binaryName , args ){
+
+ var fullArgs = [ "" ]
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if ( isObject( args ) || ( args.length == 1 && isObject( args[0] ) ) ){
+
+ var o = isObject( args ) ? args : args[0]
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion != "") {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function( option, value ){
+
+ if( ! o.binVersion ) return true
+
+ // Version 1.x options
+ if( o.binVersion.startsWith( "1." ) ){
+
+ return [ "nopreallocj" ].indexOf( option ) < 0
+ }
+
+ return true
+ }
+
+ for ( var k in o ){
+
+ // Make sure our logical option should be added to the array of options
+ if( ! o.hasOwnProperty( k ) ||
+ k in MongoRunner.logicalOptions ||
+ ! isValidOptionForBinary( k, o[k] ) ) continue
+
+ if ( ( k == "v" || k == "verbose" ) && isNumber( o[k] ) ){
+ var n = o[k]
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10
+ var temp = "-"
+ while ( n-- > 0 ) temp += "v"
+ fullArgs.push( temp )
+ }
+ }
+ else {
+ if( o[k] == undefined || o[k] == null ) continue
+ fullArgs.push( "--" + k )
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] )
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ fullArgs[ 0 ] = binaryName
+ return fullArgs
+}
+
+MongoRunner.arrToOpts = function( arr ){
+
+ var opts = {}
+ for( var i = 1; i < arr.length; i++ ){
+ if( arr[i].startsWith( "-" ) ){
+ var opt = arr[i].replace( /^-/, "" ).replace( /^-/, "" )
+
+ if( arr.length > i + 1 && ! arr[ i + 1 ].startsWith( "-" ) ){
+ opts[ opt ] = arr[ i + 1 ]
+ i++
+ }
+ else{
+ opts[ opt ] = ""
+ }
+
+ if( opt.replace( /v/g, "" ) == "" ){
+ opts[ "verbose" ] = opt.length
+ }
+ }
+ }
+
+ return opts
+}
+
+MongoRunner.savedOptions = {}
+
+MongoRunner.mongoOptions = function( opts ){
+
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
+
+ // If we're a mongo object
+ if( opts.getDB ){
+ opts = { restart : opts.runId }
+ }
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge( opts || {}, {} )
+
+ if( ! opts.restart ) opts.restart = false
+
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if( opts.restart && opts.restart.getDB ){
+ opts.runId = opts.restart.runId
+ opts.restart = true
+ }
+ // If it's the runId itself
+ else if( isObject( opts.restart ) ){
+ opts.runId = opts.restart
+ opts.restart = true
+ }
+
+ if( isObject( opts.remember ) ){
+ opts.runId = opts.remember
+ opts.remember = true
+ }
+ else if( opts.remember == undefined ){
+ // Remember by default if we're restarting
+ opts.remember = opts.restart
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if( isObject( opts.runId ) && opts.runId.runId ) opts.runId = opts.runId.runId
+
+ if( opts.restart && opts.remember ) opts = Object.merge( MongoRunner.savedOptions[ opts.runId ], opts )
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId()
+
+ // Save the port if required
+ if( ! opts.forgetPort ) opts.port = opts.port || MongoRunner.nextOpenPort()
+
+ var shouldRemember = ( ! opts.restart && ! opts.noRemember ) || ( opts.restart && opts.appendOptions )
+
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ if ( shouldRemember ){
+ MongoRunner.savedOptions[ opts.runId ] = Object.merge( opts, {} )
+ }
+
+ // Default for waitForConnect is true
+ opts.waitForConnect = (waitForConnect == undefined || waitForConnect == null) ?
+ true : waitForConnect;
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort()
+ MongoRunner.usedPortMap[ "" + parseInt( opts.port ) ] = true
+
+ opts.pathOpts = Object.merge( opts.pathOpts || {}, { port : "" + opts.port, runId : "" + opts.runId } )
+
+ return opts
+}
+
+/**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournalPrealloc {boolean}
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+MongoRunner.mongodOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ opts.dbpath = MongoRunner.toRealDir( opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts )
+
+ opts.pathOpts = Object.merge( opts.pathOpts, { dbpath : opts.dbpath } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = opts.dbpath + "/mongod.log"
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ) {
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc )
+ opts.nopreallocj = ""
+
+ if( jsTestOptions().noJournal || opts.noJournal )
+ opts.nojournal = ""
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ if( opts.noReplSet ) opts.replSet = null
+ if( opts.arbiter ) opts.oplogSize = 1
+
+ return opts
+}
+
+MongoRunner.mongosOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ // Normalize configdb option to be host string if currently a host
+ if( opts.configdb && opts.configdb.getDB ){
+ opts.configdb = opts.configdb.host
+ }
+
+ opts.pathOpts = Object.merge( opts.pathOpts,
+ { configdb : opts.configdb.replace( /:|,/g, "-" ) } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = MongoRunner.toRealFile( "$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts )
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ){
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ return opts
+}
+
+/**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true
+ * forceLock {boolean}: Deletes the lock file if set to true
+ * dbpath {string}: location of db files
+ * cleanData {boolean}: Removes all files in dbpath if true
+ * startClean {boolean}: same as cleanData
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority)
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongod = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongodOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ if( opts.forceLock ) removeFile( opts.dbpath + "/mongod.lock" )
+ if( ( opts.cleanData || opts.startClean ) || ( ! opts.restart && ! opts.noCleanData ) ){
+ print( "Resetting db path '" + opts.dbpath + "'" )
+ resetDbpath( opts.dbpath )
+ }
+
+ opts = MongoRunner.arrOptions( "mongod", opts )
+ }
+
+ var mongod = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongod) return null;
+
+ mongod.commandLine = MongoRunner.arrToOpts( opts )
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port
+ mongod.host = mongod.name
+ mongod.port = parseInt( mongod.commandLine.port )
+ mongod.runId = runId || ObjectId()
+ mongod.savedOptions = MongoRunner.savedOptions[ mongod.runId ];
+ mongod.fullOptions = fullOptions;
+
+ return mongod
+}
+
+MongoRunner.runMongos = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongosOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ opts = MongoRunner.arrOptions( "mongos", opts )
+ }
+
+ var mongos = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongos) return null;
+
+ mongos.commandLine = MongoRunner.arrToOpts( opts )
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port
+ mongos.host = mongos.name
+ mongos.port = parseInt( mongos.commandLine.port )
+ mongos.runId = runId || ObjectId()
+ mongos.savedOptions = MongoRunner.savedOptions[ mongos.runId ]
+ mongos.fullOptions = fullOptions;
+
+ return mongos
+}
+
+/**
+ * Kills a mongod process.
+ *
+ * @param {number} port the port of the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * }
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+MongoRunner.stopMongod = function( port, signal, opts ){
+
+ if( ! port ) {
+ print( "Cannot stop mongo process " + port )
+ return
+ }
+
+ signal = signal || 15
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ var exitCode = stopMongod( parseInt( port ), parseInt( signal ), opts )
+
+ delete MongoRunner.usedPortMap[ "" + parseInt( port ) ]
+
+ return exitCode
+}
+
+MongoRunner.stopMongos = MongoRunner.stopMongod
+
+MongoRunner.isStopped = function( port ){
+
+ if( ! port ) {
+ print( "Cannot detect if process " + port + " is stopped." )
+ return
+ }
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ return MongoRunner.usedPortMap[ "" + parseInt( port ) ] ? false : true
+}
+
+/**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName The name of the tool to run
+ * @param {Object} opts options to pass to the tool
+ * {
+ * binVersion {string}: version of tool to run
+ * }
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongoTool = function( binaryName, opts ){
+
+ var opts = opts || {}
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ var argsArray = MongoRunner.arrOptions(binaryName, opts)
+
+ return runMongoProgram.apply(null, argsArray);
+
+}
+
+// Given a test name figures out a directory for that test to use for dump files and makes sure
+// that directory exists and is empty.
+MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+}
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+}
+startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+}
+startMongodNoReset = function(){
+ var args = createMongoArgs( "mongod" , arguments );
+ return startMongoProgram.apply( null, args );
+}
+
+startMongos = function(args){
+ return MongoRunner.runMongos(args);
+}
+
+/**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+function appendSetParameterArgs(argArray) {
+ var programName = argArray[0];
+ if (programName.endsWith('mongod') || programName.endsWith('mongos')) {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push.apply(argArray, ['--setParameter', "enableTestCommands=1"]);
+ }
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
+ var hasAuthMechs = false;
+ for (i in argArray) {
+ if (typeof argArray[i] === 'string' &&
+ argArray[i].indexOf('authenticationMechanisms') != -1) {
+ hasAuthMechs = true;
+ break;
+ }
+ }
+ if (!hasAuthMechs) {
+ argArray.push.apply(argArray,
+ ['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ }
+ }
+ if (jsTest.options().auth) {
+ argArray.push.apply(argArray, ['--setParameter', "enableLocalhostAuthBypass=false"]);
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ if ( argArray.indexOf('--sslMode') < 0 ) {
+ argArray.push.apply(argArray, [ '--sslMode', 'requireSSL', '--sslPEMKeyFile', 'jstests/libs/server.pem', '--sslCAFile', 'jstests/libs/ca.pem', '--sslWeakCertificateValidation' ] );
+ }
+ }
+
+ // mongos only options
+ if (programName.endsWith('mongos')) {
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ var params = jsTest.options().setParametersMongos.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ // mongod only options
+ else if (programName.endsWith('mongod')) {
+ // set storageEngine for mongod
+ if (jsTest.options().storageEngine) {
+ argArray.push.apply(argArray, ['--storageEngine', jsTest.options().storageEngine]);
+ }
+ // apply setParameters for mongod
+ if (jsTest.options().setParameters) {
+ var params = jsTest.options().setParameters.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ }
+ return argArray;
+};
+
+/**
+ * Start a mongo process with a particular argument array. If we aren't waiting for connect,
+ * return null.
+ */
+MongoRunner.startWithArgs = function(argArray, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = _startMongoProgram.apply(null, argArray);
+
+ var conn = null;
+ if (waitForConnect) {
+ assert.soon( function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+ }
+
+ return conn;
+}
+
+/**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+startMongoProgram = function(){
+ var port = _parsePort.apply( null, arguments );
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = argumentsToArray( arguments );
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply( null, args );
+
+ var m;
+ assert.soon
+ ( function() {
+ try {
+ m = new Mongo( "127.0.0.1:" + port );
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ m = null;
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000 );
+
+ return m;
+}
+
+runMongoProgram = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift( progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin'
+ );
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ args.push("--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHosts");
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _runMongoProgram.apply( null, args );
+}
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin');
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _startMongoProgram.apply( null, args );
+}
+
+myPort = function() {
+ var m = db.getMongo();
+ if ( m.host.match( /:/ ) )
+ return m.host.match( /:(.*)/ )[ 1 ];
+ else
+ return 27017;
+}
+
+}());
diff --git a/test/legacy28/jstests/libs/servers_misc.js b/test/legacy28/jstests/libs/servers_misc.js
new file mode 100644
index 00000000000..bb7ceae99bc
--- /dev/null
+++ b/test/legacy28/jstests/libs/servers_misc.js
@@ -0,0 +1,357 @@
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function( port, dbpath, peer, arbiter, extraArgs, options ) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function( reuseData ) {
+ var args = [];
+ if ( reuseData ) {
+ args.push( "mongod" );
+ }
+ args.push( "--port" );
+ args.push( this.port_ );
+ args.push( "--dbpath" );
+ args.push( this.dbpath_ );
+ args.push( "--nohttpinterface" );
+ args.push( "--noprealloc" );
+ args.push( "--smallfiles" );
+ if (!this.options_.no_bind) {
+ args.push( "--bind_ip" );
+ args.push( "127.0.0.1" );
+ }
+ if ( this.extraArgs_ ) {
+ args = args.concat( this.extraArgs_ );
+ }
+ removeFile( this.dbpath_ + "/mongod.lock" );
+ if ( reuseData ) {
+ return startMongoProgram.apply( null, args );
+ } else {
+ return startMongod.apply( null, args );
+ }
+}
+
+MongodRunner.prototype.port = function() { return this.port_; }
+
+MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }
+
+ToolTest = function( name, extraOptions ){
+ this.name = name;
+ this.options = extraOptions;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = MongoRunner.dataPath + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ this.useSSL = jsTestOptions().useSSL
+ resetDbpath( this.dbpath );
+ resetDbpath( this.ext );
+}
+
+ToolTest.prototype.startDB = function( coll ){
+ assert( ! this.m , "db already running" );
+
+ var options = {port : this.port,
+ dbpath : this.dbpath,
+ nohttpinterface : "",
+ noprealloc : "",
+ smallfiles : "",
+ bind_ip : "127.0.0.1"};
+
+ Object.extend(options, this.options);
+
+ if ( this.useSSL ) {
+ Object.extend(options, { sslMode: "requireSSL", sslPEMKeyFile: "jstests/libs/server.pem", sslCAFile: "jstests/libs/ca.pem", sslWeakCertificateValidation: "" } );
+ }
+
+ this.m = startMongoProgram.apply(null, MongoRunner.arrOptions("mongod", options));
+ this.db = this.m.getDB( this.baseName );
+ if ( coll )
+ return this.db.getCollection( coll );
+ return this.db;
+}
+
+ToolTest.prototype.stop = function(){
+ if ( ! this.m )
+ return;
+ stopMongod( this.port );
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+}
+
+ToolTest.prototype.runTool = function(){
+ var a = [ "mongo" + arguments[0] ];
+
+ var hasdbpath = false;
+
+ for ( var i=1; i<arguments.length; i++ ){
+ a.push( arguments[i] );
+ if ( arguments[i] == "--dbpath" )
+ hasdbpath = true;
+ }
+
+ if ( this.useSSL ) {
+ a = a.concat(["--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHosts"]);
+ }
+
+ if ( ! hasdbpath ){
+ a.push( "--host" );
+ a.push( "127.0.0.1:" + this.port );
+ }
+
+ return runMongoProgram.apply( null , a );
+}
+
+
+ReplTest = function( name, ports ){
+ this.name = name;
+ this.ports = ports || allocatePorts( 2 );
+}
+
+ReplTest.prototype.getPort = function( master ){
+ if ( master )
+ return this.ports[ 0 ];
+ return this.ports[ 1 ]
+}
+
+ReplTest.prototype.getPath = function( master ){
+ var p = MongoRunner.dataPath + this.name + "-";
+ if ( master )
+ p += "master";
+ else
+ p += "slave"
+ return p;
+}
+
+ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norepl ){
+
+ if ( ! extra )
+ extra = {};
+
+ if ( ! extra.oplogSize )
+ extra.oplogSize = "40";
+
+ var a = []
+ if ( putBinaryFirst )
+ a.push( "mongod" )
+ a.push( "--nohttpinterface", "--noprealloc", "--bind_ip" , "127.0.0.1" , "--smallfiles" );
+
+ a.push( "--port" );
+ a.push( this.getPort( master ) );
+
+ a.push( "--dbpath" );
+ a.push( this.getPath( master ) );
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+ if( jsTestOptions().keyFile ) {
+ a.push( "--keyFile" )
+ a.push( jsTestOptions().keyFile )
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!a.contains("--sslMode")) {
+ a.push( "--sslMode" )
+ a.push( "requireSSL" )
+ }
+ if (!a.contains("--sslPEMKeyFile")) {
+ a.push( "--sslPEMKeyFile" )
+ a.push( "jstests/libs/server.pem" )
+ }
+ if (!a.contains("--sslCAFile")) {
+ a.push( "--sslCAFile" )
+ a.push( "jstests/libs/ca.pem" )
+ }
+ a.push( "--sslWeakCertificateValidation" )
+ }
+ if( jsTestOptions().useX509 && !a.contains("--clusterAuthMode")) {
+ a.push( "--clusterAuthMode" )
+ a.push( "x509" )
+ }
+
+ if ( !norepl ) {
+ if ( master ){
+ a.push( "--master" );
+ }
+ else {
+ a.push( "--slave" );
+ a.push( "--source" );
+ a.push( "127.0.0.1:" + this.ports[0] );
+ }
+ }
+
+ for ( var k in extra ){
+ var v = extra[k];
+ if( k in MongoRunner.logicalOptions ) continue
+ a.push( "--" + k );
+ if ( v != null )
+ a.push( v );
+ }
+
+ return a;
+}
+
+ReplTest.prototype.start = function( master , options , restart, norepl ){
+ var lockFile = this.getPath( master ) + "/mongod.lock";
+ removeFile( lockFile );
+ var o = this.getOptions( master , options , restart, norepl );
+
+ if (restart) {
+ return startMongoProgram.apply(null, o);
+ } else {
+ var conn = startMongod.apply(null, o);
+ if (jsTestOptions().keyFile || jsTestOptions().auth || jsTestOptions().useX509) {
+ jsTest.authenticate(conn);
+ }
+ return conn;
+ }
+}
+
+ReplTest.prototype.stop = function( master , signal ){
+ if ( arguments.length == 0 ){
+ this.stop( true );
+ this.stop( false );
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return stopMongod( this.getPort( master ) , signal || 15 );
+}
+
+allocatePorts = function( n , startPort ) {
+ var ret = [];
+ var start = startPort || 31000;
+ for( var i = start; i < start + n; ++i )
+ ret.push( i );
+ return ret;
+}
+
+
+SyncCCTest = function( testName , extraMongodOptions ){
+ this._testName = testName;
+ this._connections = [];
+
+ for ( var i=0; i<3; i++ ){
+ this._connections.push( startMongodTest( 30000 + i , testName + i , false, extraMongodOptions ) );
+ }
+
+ this.url = this._connections.map( function(z){ return z.name; } ).join( "," );
+ this.conn = new Mongo( this.url );
+}
+
+SyncCCTest.prototype.stop = function(){
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+}
+
+SyncCCTest.prototype.checkHashes = function( dbname , msg ){
+ var hashes = this._connections.map(
+ function(z){
+ return z.getDB( dbname ).runCommand( "dbhash" );
+ }
+ );
+
+ for ( var i=1; i<hashes.length; i++ ){
+ assert.eq( hashes[0].md5 , hashes[i].md5 , "checkHash on " + dbname + " " + msg + "\n" + tojson( hashes ) )
+ }
+}
+
+SyncCCTest.prototype.tempKill = function( num ){
+ num = num || 0;
+ stopMongod( 30000 + num );
+}
+
+SyncCCTest.prototype.tempStart = function( num ){
+ num = num || 0;
+ this._connections[num] = startMongodTest( 30000 + num , this._testName + num , true );
+}
+
+
+function startParallelShell( jsCode, port, noConnect ){
+ var x;
+
+ var args = ["mongo"];
+
+ // Convert function into call-string
+ if (typeof(jsCode) == "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ }
+ else if(typeof(jsCode) == "string") {}
+ // do nothing
+ else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
+ if (noConnect) {
+ args.push("--nodb");
+ } else if (typeof(db) == "object") {
+ jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db == "object") {
+ var hostAndPort = db.getMongo().host.split(':');
+ var host = hostAndPort[0];
+ args.push("--host", host);
+ if (!port && hostAndPort.length >= 2) {
+ var port = hostAndPort[1];
+ }
+ }
+ if (port) {
+ args.push("--port", port);
+ }
+
+ if( jsTestOptions().useSSL ) {
+ args.push( "--ssl" )
+ args.push( "--sslPEMKeyFile" )
+ args.push( "jstests/libs/client.pem" )
+ args.push( "--sslCAFile" )
+ args.push( "jstests/libs/ca.pem" )
+ }
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function(){
+ waitProgram( x );
+ };
+}
+
+var testingReplication = false;
+
+function skipIfTestingReplication(){
+ if (testingReplication) {
+ print("skipIfTestingReplication skipping");
+ quit(0);
+ }
+}
diff --git a/test/legacy28/jstests/libs/slow_weekly_util.js b/test/legacy28/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..f5f89643f16
--- /dev/null
+++ b/test/legacy28/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed successfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/test/legacy28/jstests/libs/smoke.pem b/test/legacy28/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..7dddf222386
--- /dev/null
+++ b/test/legacy28/jstests/libs/smoke.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIBCDANBgkqhkiG9w0BAQUFADBrMQ4wDAYDVQQDEwVzbW9r
+ZTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1O
+ZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwHhcN
+MTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBrMQ4wDAYDVQQDEwVzbW9rZTEP
+MA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcg
+WW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb4fOWDomCPyYesh42pQ/bEHdK7r73
+06x1hdku9i+nytCSxhhuITGC1FA4ZIbYdQC/jgfzC0D+SDFKCCyNZA/2Pxam9y3F
+QHrueNtD9bw/OB98D6hC2fCow5OxUqWDkee2hQRTwLKDzec+H72AkwURh8oTfJsl
+LL/1YITZs9kfs59r8HG2YAT7QBbg3xBmK0wZvL4V/FY/OeeR92pIgjUU/6xm/1LU
+bhNHl5JTrXQxPpmvDb1ysiI0mMLeUz7UI+Pe/9mn91dHwgkprWyFi6VnV3/aW7DC
+nW/DklOPD8vMWu2A6iYU0fZbcj4vGM607vst5QLDMoD5Y2ilrKLiTRa5AgMBAAGj
+EDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJc64d76+eyNGX6C
+5r4IdFF3zJjkLs/NcSMReUTEv4zAdJCn7c1FNRkQBS3Ky2CeSGmiyYOhWZ7usv7x
+EvprmHouWsrQXV+o5EIW366e5wzg0c5KWO3oBIRjx4hDkRSQSjJjy5NFrc8fAW9x
+eeaHFWdqk3CHvqBhd32QYEs4+7v8hBYM3PBkj8qghXta4ZZS89cTMSjhu5s4Opje
+qUzGzoHat2VBdYzIpVOorYMFXObwCeQkCAXO5epuGZ0QhML66hc7FuOsW75kI9aW
+QXVoM/z2Gb1wbBYnwHOXtClK783S3RdV0uJun/pVj+VeHb6fyIQRmC5d0eJ0C8mY
+X+acnvA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA2+Hzlg6Jgj8mHrIeNqUP2xB3Su6+99OsdYXZLvYvp8rQksYY
+biExgtRQOGSG2HUAv44H8wtA/kgxSggsjWQP9j8WpvctxUB67njbQ/W8PzgffA+o
+QtnwqMOTsVKlg5HntoUEU8Cyg83nPh+9gJMFEYfKE3ybJSy/9WCE2bPZH7Ofa/Bx
+tmAE+0AW4N8QZitMGby+FfxWPznnkfdqSII1FP+sZv9S1G4TR5eSU610MT6Zrw29
+crIiNJjC3lM+1CPj3v/Zp/dXR8IJKa1shYulZ1d/2luwwp1vw5JTjw/LzFrtgOom
+FNH2W3I+LxjOtO77LeUCwzKA+WNopayi4k0WuQIDAQABAoIBAQDRFgAaDcLGfqQS
+Bk/iqHz2U6cMMxCW+sqAioGmPWW9iYdiOkra1meNP7T0mur7A+9tN3LpsybfZeiw
+vCsZXDAteXph1KPKcPE0uOnPqumRuB2ATCc1Qqas5CUaNju7a8/J6Jzfw1o9KVud
+4HLDw4nLTLNkalXhOLdkbp6FoZZypAgc8OnSdw7z9Kri6VndkddX3fWv4t203XwT
+AvBxvy4Qfblz6VKYRnjj2CPvo/kD+ncFEg+S6u8/LkghTX7CYeMHdTC0P9jOcEK2
+PMm3kS3sX7VkypsAirYK5QtBWxur+mINxfOBDtRlA2RaJQnikRiGb14bMkLx8Liy
+JNjEHSLdAoGBAP9+KpjniozZIbrcS79wdRrW+ARyDp1Plzyd4nQxfWmQ//nsnK5T
+EYCFXWTR/ldkAoHpD+bGGU02p1+1u4vmWqw/x+Qy56Gh/eylhe0RvYEjkVLyreuc
+bXu0BFlKVgRlBq1ZyXnr2lz3bAIZxvZs13lZn6qVPMt7w2/JTCal9jw7AoGBANxR
+sGik9mq/678nzLiNlf/LcwIz7siuyISoWDOaVEVva0uorqctVqL95w0f+3FXqBO/
+5BiJRFo5D8SfzRjkNkJ7V+rm+7/CjtsjEw2Ue+ZJYPlm+Wr545GYmhU9QH9NLZIN
+JBwTVWjLgdsyQyi0Gc+xMraBwEwoyS8cO17uHO2bAoGBANRmO91/6BPt0ve4epR9
+Vi1o9yki9PlcmHtBOmikWAFyFQvd4+eckVlKBflyBkL6locPjTOqDpC9VengeDj2
+2PyHzZLtqtkZhbK9bJhIfkWknwTZUTMliXMkldTxUo82uZVVpoRgSdmtq7IXYeut
+UnjExFMY3EDB9BizvUYIBKvPAoGAViQ6bS/SiPpxGlRdXus88r6BQSM9AYoVLIkF
+s2dr+5oMwZA6eXLopOHRLPiMP0yekto8PLuu1ffpil9QuaLA9E11moqlc9yGLngQ
+QwcDSo72M41nh8Qcjhi0ZgmE5kEuyCQLMk783fRz2VhVmdyRGvuVcHZa0WxA/QJ0
+1DEVbnECgYEA3i2PGHUvU2TIFNvubw3qdH5y7FXafF+O0ulQ8e6r/CbVAG14Z6xP
+RHLc7/JIYK9CG1PWCbkjiHZ4MsKFuRWFrUMrwSj8M3euCaEIxa/Co60qQ/CnZiZ6
+geleTtUcTZ2T0pqGLnrHwlzhLpCkPJPyjcfQjjEZRwd0bVFX6b3C/rw=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/test/legacy28/jstests/libs/test_background_ops.js b/test/legacy28/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..b3f6f593947
--- /dev/null
+++ b/test/legacy28/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw Error("Error in parallel ops " + procName + " : "
+ + tojson( result.err ) )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/test/legacy28/jstests/libs/testconfig b/test/legacy28/jstests/libs/testconfig
new file mode 100644
index 00000000000..4b09f37ad13
--- /dev/null
+++ b/test/legacy28/jstests/libs/testconfig
@@ -0,0 +1,6 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
+help = false
+sysinfo = false
diff --git a/test/legacy28/jstests/libs/testconfig.json b/test/legacy28/jstests/libs/testconfig.json
new file mode 100644
index 00000000000..5af32aad7d3
--- /dev/null
+++ b/test/legacy28/jstests/libs/testconfig.json
@@ -0,0 +1,4 @@
+{
+ "fastsync" : true,
+ "version" : false
+}
diff --git a/test/legacy28/jstests/libs/trace_missing_docs.js b/test/legacy28/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/test/legacy28/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/test/legacy28/jstests/replsets/rslib.js b/test/legacy28/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..8b7d2ed1263
--- /dev/null
+++ b/test/legacy28/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getMaster().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getMaster().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/test/legacy28/jstests/tool/csv1.js b/test/legacy28/jstests/tool/csv1.js
new file mode 100644
index 00000000000..e95d8aa8b41
--- /dev/null
+++ b/test/legacy28/jstests/tool/csv1.js
@@ -0,0 +1,43 @@
+// csv1.js
+
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.docEq( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"}, a[1], "csv parse 1" );
+assert.docEq( base, a[0], "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base, x, "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/test/legacy28/jstests/tool/csvexport1.js b/test/legacy28/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/test/legacy28/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/test/legacy28/jstests/tool/csvexport2.js b/test/legacy28/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..2dc87b3c641
--- /dev/null
+++ b/test/legacy28/jstests/tool/csvexport2.js
@@ -0,0 +1,32 @@
+// csvexport2.js
+
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop()
diff --git a/test/legacy28/jstests/tool/csvimport1.js b/test/legacy28/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..87320afec87
--- /dev/null
+++ b/test/legacy28/jstests/tool/csvimport1.js
@@ -0,0 +1,41 @@
+// csvimport1.js
+
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.docEq( base[i], a[i], "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.docEq( base[i], x[i], "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/test/legacy28/jstests/tool/data/a.tsv b/test/legacy28/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/test/legacy28/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/test/legacy28/jstests/tool/data/csvimport1.csv b/test/legacy28/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/test/legacy28/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/test/legacy28/jstests/tool/data/dumprestore6/foo.bson b/test/legacy28/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 00000000000..b8f8f99e6bf
--- /dev/null
+++ b/test/legacy28/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson b/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 00000000000..dde25da302a
--- /dev/null
+++ b/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/test/legacy28/jstests/tool/dumpauth.js b/test/legacy28/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..baedda58a75
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumpauth.js
@@ -0,0 +1,39 @@
+// dumpauth.js
+// test mongodump with authentication
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
+
+// SERVER-5233: mongodump with authentication breaks when using "--out -"
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol",
+ "--out", "-" );
+assert.eq(x, 0, "mongodump should succeed with authentication while using '--out'");
diff --git a/test/legacy28/jstests/tool/dumpfilename1.js b/test/legacy28/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..38b430896bf
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumpfilename1.js
@@ -0,0 +1,13 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+assert.writeOK(c.getCollection("df/").insert({ a: 3 }));
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/test/legacy28/jstests/tool/dumprestore1.js b/test/legacy28/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..a0f6f844d9e
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore1.js
@@ -0,0 +1,32 @@
+// dumprestore1.js
+
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
+
+// Ensure that --db and --collection are provided when filename is "-" (stdin).
+ret = t.runTool( "restore" , "--collection" , "coll", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
+ret = t.runTool( "restore" , "--db" , "db", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore10.js b/test/legacy28/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..b4f029fdefa
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore10.js
@@ -0,0 +1,64 @@
+// simple test to ensure write concern functions as expected
+
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/test/legacy28/jstests/tool/dumprestore3.js b/test/legacy28/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..4bf60bf3cac
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore3.js
@@ -0,0 +1,61 @@
+// dumprestore3.js
+
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, _isWindows() ? -1 : 255, "mongorestore should exit w/ -1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, _isWindows() ? -1 : 255, "mongoreimport should exit w/ -1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/test/legacy28/jstests/tool/dumprestore4.js b/test/legacy28/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..61e7d33213a
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore4.js
@@ -0,0 +1,43 @@
+// dumprestore4.js -- see SERVER-2186
+
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore6.js b/test/legacy28/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..44135a37579
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore6.js
@@ -0,0 +1,28 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore7.js b/test/legacy28/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..2c9e6560f94
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getMaster().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/test/legacy28/jstests/tool/dumprestore8.js b/test/legacy28/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..239c15a701a
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore8.js
@@ -0,0 +1,106 @@
+// dumprestore8.js
+
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" );
+
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" );
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore9.js b/test/legacy28/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..cef9a623cf1
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, { chunksize : 1, enableBalancer : 1 } );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js b/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..3f6360168b0
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,112 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+// We turn this off to prevent the server from touching the 'options' field in system.namespaces.
+// This is important because we check exact values of the 'options' field in this test.
+db.adminCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( {}, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( {}, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert.eq( {}, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore_auth.js b/test/legacy28/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..a2de1f983f5
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,99 @@
+// dumprestore_auth.js
+
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+var dbName = c.getDB().toString();
+print("DB is ",dbName);
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+// Add user defined roles & users with those roles
+var testUserAdmin = c.getDB().getSiblingDB(dbName);
+var backupActions = ["find","listCollections", "listIndexes"];
+testUserAdmin.createRole({role: "backupFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:backupActions},
+ {resource: {db: dbName, collection: "system.indexes"},
+ actions: backupActions},
+ {resource: {db: dbName, collection: "" },
+ actions: backupActions},
+ {resource: {db: dbName, collection: "system.namespaces"},
+ actions: backupActions}],
+ roles: []});
+testUserAdmin.createUser({user: 'backupFoo', pwd: 'password', roles: ['backupFoo']});
+
+var restoreActions = ["collMod", "createCollection","createIndex","dropCollection","insert"];
+var restoreActionsFind = restoreActions;
+restoreActionsFind.push("find");
+testUserAdmin.createRole({role: "restoreChester",
+ privileges: [{resource: {db: dbName, collection: "chester"}, actions: restoreActions},
+ {resource: {db: dbName, collection: "system.indexes"},
+ actions: restoreActions},
+ {resource: {db: dbName, collection: "system.namespaces"},
+ actions: restoreActionsFind}],
+ roles: []});
+testUserAdmin.createRole({role: "restoreFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:restoreActions},
+ {resource: {db: dbName, collection: "system.indexes"},
+ actions: restoreActions},
+ {resource: {db: dbName, collection: "system.namespaces"},
+ actions: restoreActionsFind}],
+ roles: []});
+testUserAdmin.createUser({user: 'restoreChester', pwd: 'password', roles: ['restoreChester']});
+testUserAdmin.createUser({user: 'restoreFoo', pwd: 'password', roles: ['restoreFoo']});
+
+var sysUsers = adminDB.system.users.count();
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+assert.eq(0, c.getDB().system.namespaces.findOne(
+{name: c.getFullName()}).options.flags, "find namespaces 1");
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore should fail without user & pass
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern" ,"0");
+assert.eq(0 , c.count() , "after restore without auth");
+
+// Restore should pass with authorized user
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+assert.eq(0, c.getDB().system.namespaces.findOne(
+{name: c.getFullName()}).options.flags, "find namespaces 2");
+assert.eq(sysUsers, adminDB.system.users.count());
+
+// Ddump & restore DB/colection with user defined roles
+t.runTool("dump" , "--out" , t.ext, "--username", "backupFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore with wrong user
+t.runTool("restore" , "--username", "restoreChester", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.eq(0 , c.count() , "after restore with wrong user");
+
+// Restore with proper user
+t.runTool("restore" , "--username", "restoreFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 3");
+assert.eq(22 , c.findOne().a , "after restore 3");
+assert.eq(0, c.getDB().system.namespaces.findOne(
+{name: c.getFullName()}).options.flags, "find namespaces 3");
+assert.eq(sysUsers, adminDB.system.users.count());
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/dumprestore_auth2.js b/test/legacy28/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..0392d1be3db
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,98 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.roles.getIndexes().length, "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop(); \ No newline at end of file
diff --git a/test/legacy28/jstests/tool/dumprestore_auth3.js b/test/legacy28/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..f65bed7abff
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,200 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod); \ No newline at end of file
diff --git a/test/legacy28/jstests/tool/dumprestore_excludecollections.js b/test/legacy28/jstests/tool/dumprestore_excludecollections.js
new file mode 100644
index 00000000000..dcfab742053
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumprestore_excludecollections.js
@@ -0,0 +1,112 @@
+// Tests for mongodump options for excluding collections
+
+
+var testBaseName = "jstests_tool_dumprestore_excludecollections";
+
+var dumpDir = MongoRunner.dataPath + testBaseName + "_dump_external/";
+
+var mongodSource = MongoRunner.runMongod();
+var sourceDB = mongodSource.getDB(testBaseName);
+var mongodDest = MongoRunner.runMongod();
+var destDB = mongodDest.getDB(testBaseName);
+
+jsTest.log("Inserting documents into source mongod");
+sourceDB.test.insert({x:1});
+sourceDB.test2.insert({x:2});
+sourceDB.test3.insert({x:3});
+sourceDB.foo.insert({f:1});
+sourceDB.foo2.insert({f:2});
+
+jsTest.log("Testing incompabible option combinations");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection but no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and --collection");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix but " +
+ "no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix and " +
+ "--collection");
+
+jsTest.log("Testing proper behavior of collection exclusion");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test2.findOne().x, 2, "Wrong value in document");
+assert.eq(destDB.test3.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test3.findOne().x, 3, "Wrong value in document");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+// The --excludeCollection and --excludeCollectionsWithPrefix options can be specified multiple
+// times, but that is not tested here because right now MongoRunners can only be configured using
+// javascript objects which do not allow duplicate keys. See SERVER-14220.
+
+MongoRunner.stopMongod(mongodDest.port);
+MongoRunner.stopMongod(mongodSource.port);
+
+print(testBaseName + " success!");
diff --git a/test/legacy28/jstests/tool/dumpsecondary.js b/test/legacy28/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..68a81210c12
--- /dev/null
+++ b/test/legacy28/jstests/tool/dumpsecondary.js
@@ -0,0 +1,39 @@
+
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/test/legacy28/jstests/tool/exportimport1.js b/test/legacy28/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..5e206d8c40b
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport1.js
@@ -0,0 +1,67 @@
+// exportimport1.js
+
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/exportimport3.js b/test/legacy28/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..4f0fdd46609
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport3.js
@@ -0,0 +1,28 @@
+// exportimport3.js
+
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/exportimport4.js b/test/legacy28/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/exportimport5.js b/test/legacy28/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/exportimport6.js b/test/legacy28/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a6406dfa880
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport6.js
@@ -0,0 +1,27 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/test/legacy28/jstests/tool/exportimport_bigarray.js b/test/legacy28/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..e8bd4a468b4
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,59 @@
+// Test importing collections represented as a single line array above the maximum document size
+
+
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+var bulk = src.initializeUnorderedBulkOp();
+for (i = 0; i < numDocs; ++i) {
+ bulk.insert({ x: bigString });
+}
+assert.writeOK(bulk.execute());
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/test/legacy28/jstests/tool/exportimport_date.js b/test/legacy28/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..9dc6c275a96
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport_date.js
@@ -0,0 +1,50 @@
+
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js b/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js
new file mode 100644
index 00000000000..a4705dc3ceb
--- /dev/null
+++ b/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js
@@ -0,0 +1,38 @@
+
+var tt = new ToolTest('exportimport_minkey_maxkey_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+src.insert({ "_id" : MaxKey });
+src.insert({ "_id" : MinKey });
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('export', '--out' , tt.extFile, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('import', '--file', tt.extFile, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/test/legacy28/jstests/tool/files1.js b/test/legacy28/jstests/tool/files1.js
new file mode 100644
index 00000000000..3db783df19f
--- /dev/null
+++ b/test/legacy28/jstests/tool/files1.js
@@ -0,0 +1,28 @@
+// files1.js
+
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/test/legacy28/jstests/tool/oplog1.js b/test/legacy28/jstests/tool/oplog1.js
new file mode 100644
index 00000000000..e908b5efd59
--- /dev/null
+++ b/test/legacy28/jstests/tool/oplog1.js
@@ -0,0 +1,27 @@
+// oplog1.js
+
+
+// very basic test for mongooplog
+// need a lot more, but test that it functions at all
+
+t = new ToolTest( "oplog1" );
+
+db = t.startDB();
+
+output = db.output
+
+doc = { _id : 5 , x : 17 };
+
+db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+
+assert.eq( 0 , output.count() , "before" )
+
+t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+
+assert.eq( 1 , output.count() , "after" );
+
+assert.eq( doc , output.findOne() , "after check" );
+
+t.stop();
+
+
diff --git a/test/legacy28/jstests/tool/oplog_all_ops.js b/test/legacy28/jstests/tool/oplog_all_ops.js
new file mode 100644
index 00000000000..a0eb3e34dc9
--- /dev/null
+++ b/test/legacy28/jstests/tool/oplog_all_ops.js
@@ -0,0 +1,62 @@
+/**
+ * Performs a simple test on mongooplog by doing different types of operations
+ * that will show up in the oplog then replaying it on another replica set.
+ * Correctness is verified using the dbhash command.
+ */
+
+
+var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl1.startSet({ oplogSize: 10 });
+repl1.initiate();
+repl1.awaitSecondaryNodes();
+
+var repl1Conn = new Mongo(repl1.getURL());
+var testDB = repl1Conn.getDB('test');
+var testColl = testDB.user;
+
+// op i
+testColl.insert({ x: 1 });
+testColl.insert({ x: 2 });
+
+// op c
+testDB.dropDatabase();
+
+testColl.insert({ y: 1 });
+testColl.insert({ y: 2 });
+testColl.insert({ y: 3 });
+
+// op u
+testColl.update({}, { $inc: { z: 1 }}, true, true);
+
+// op d
+testColl.remove({ y: 2 });
+
+// op n
+var oplogColl = repl1Conn.getCollection('local.oplog.rs');
+oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
+
+var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl2.startSet({ oplogSize: 10 });
+repl2.initiate();
+repl2.awaitSecondaryNodes();
+
+var srcConn = repl1.getPrimary();
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
+ '--host', repl2.getPrimary().host);
+
+var repl1Hash = testDB.runCommand({ dbhash: 1 });
+
+var repl2Conn = new Mongo(repl2.getURL());
+var testDB2 = repl2Conn.getDB(testDB.getName());
+var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+
+assert(repl1Hash.md5);
+assert.eq(repl1Hash.md5, repl2Hash.md5);
+
+repl1.stopSet();
+repl2.stopSet();
+
diff --git a/test/legacy28/jstests/tool/restorewithauth.js b/test/legacy28/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..ce1a62aad49
--- /dev/null
+++ b/test/legacy28/jstests/tool/restorewithauth.js
@@ -0,0 +1,114 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 )
+
+//make sure it has no index except _id
+assert.eq(foo.system.indexes.count(), 2);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.system.indexes.count(), 3);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0);
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+// make sure that the collection is empty
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.system.indexes.count(), 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/test/legacy28/jstests/tool/stat1.js b/test/legacy28/jstests/tool/stat1.js
new file mode 100644
index 00000000000..80ead9f7dfd
--- /dev/null
+++ b/test/legacy28/jstests/tool/stat1.js
@@ -0,0 +1,18 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1" );
+assert.eq(x, _isWindows() ? -1 : 255, "mongostat should exit with -1 with eliot:wrong");
diff --git a/test/legacy28/jstests/tool/tool1.js b/test/legacy28/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/test/legacy28/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/test/legacy28/jstests/tool/tool_replset.js b/test/legacy28/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..62e1dba8c62
--- /dev/null
+++ b/test/legacy28/jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc, "v" : NumberInt(2) });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/test/legacy28/jstests/tool/tsv1.js b/test/legacy28/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..677bec2af9c
--- /dev/null
+++ b/test/legacy28/jstests/tool/tsv1.js
@@ -0,0 +1,33 @@
+// tsv1.js
+
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.docEq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.docEq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base , x , "tsv parse 2" )
+
+
+
+t.stop()