diff options
Diffstat (limited to 'src/mongo/gotools/test/legacy26')
93 files changed, 6216 insertions, 0 deletions
diff --git a/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py b/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py new file mode 100644 index 00000000000..a31b3e2dfa1 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py @@ -0,0 +1,480 @@ +""" +buildlogger.py + +Wrap a command (specified on the command line invocation of buildlogger.py) +and send output in batches to the buildlogs web application via HTTP POST. + +The script configures itself from environment variables: + + required env vars: + MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit") + MONGO_BUILD_NUMBER (an integer) + MONGO_TEST_FILENAME (not required when invoked with -g) + + optional env vars: + MONGO_PHASE (e.g. "core", "slow nightly", etc) + MONGO_* (any other environment vars are passed to the web app) + BUILDLOGGER_CREDENTIALS (see below) + +This script has two modes: a "test" mode, intended to wrap the invocation of +an individual test file, and a "global" mode, intended to wrap the mongod +instances that run throughout the duration of a mongo test phase (the logs +from "global" invocations are displayed interspersed with the logs of each +test, in order to let the buildlogs web app display the full output sensibly.) + +If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a +path to a valid Python file containing "username" and "password" variables, +which should be valid credentials for authenticating to the buildlogger web +app. For example: + + username = "hello" + password = "world" + +If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory +and the directories one, two, and three levels up, are searched, in that +order. +""" + +import functools +import os +import os.path +import re +import signal +import socket +import subprocess +import sys +import time +import traceback +import urllib2 +import utils + +# suppress deprecation warnings that happen when +# we import the 'buildbot.tac' file below +import warnings +warnings.simplefilter('ignore', DeprecationWarning) + +try: + import json +except: + try: + import simplejson as json + except: + json = None + +# try to load the shared secret from settings.py +# which will be one, two, or three directories up +# from this file's location +credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac') +credentials_loc, credentials_name = os.path.split(credentials_file) +if not credentials_loc: + here = os.path.abspath(os.path.dirname(__file__)) + possible_paths = [ + os.path.abspath(os.path.join(here, '..')), + os.path.abspath(os.path.join(here, '..', '..')), + os.path.abspath(os.path.join(here, '..', '..', '..')), + ] +else: + possible_paths = [credentials_loc] + +username, password = None, None +for path in possible_paths: + credentials_path = os.path.join(path, credentials_name) + if os.path.isfile(credentials_path): + credentials = {} + try: + execfile(credentials_path, credentials, credentials) + username = credentials.get('slavename', credentials.get('username')) + password = credentials.get('passwd', credentials.get('password')) + break + except: + pass + + +URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/') +TIMEOUT_SECONDS = 10 +socket.setdefaulttimeout(TIMEOUT_SECONDS) + +digest_handler = urllib2.HTTPDigestAuthHandler() +digest_handler.add_password( + realm='buildlogs', + uri=URL_ROOT, + user=username, + passwd=password) + +# This version of HTTPErrorProcessor is copied from +# Python 2.7, and allows REST response codes (e.g. +# "201 Created") which are treated as errors by +# older versions. +class HTTPErrorProcessor(urllib2.HTTPErrorProcessor): + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + +url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor()) + +def url(endpoint): + if not endpoint.endswith('/'): + endpoint = '%s/' % endpoint + + return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint) + +def post(endpoint, data, headers=None): + data = json.dumps(data, encoding='utf-8') + + headers = headers or {} + headers.update({'Content-Type': 'application/json; charset=utf-8'}) + + req = urllib2.Request(url=url(endpoint), data=data, headers=headers) + try: + response = url_opener.open(req) + except urllib2.URLError: + import traceback + traceback.print_exc(file=sys.stderr) + sys.stderr.flush() + # indicate that the request did not succeed + return None + + response_headers = dict(response.info()) + + # eg "Content-Type: application/json; charset=utf-8" + content_type = response_headers.get('content-type') + match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type) + if match and match.group('mimetype') == 'application/json': + encoding = match.group('charset') or 'utf-8' + return json.load(response, encoding=encoding) + + return response.read() + +def traceback_to_stderr(func): + """ + decorator which logs any exceptions encountered to stderr + and returns none. + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except urllib2.HTTPError, err: + sys.stderr.write('error: HTTP code %d\n----\n' % err.code) + if hasattr(err, 'hdrs'): + for k, v in err.hdrs.items(): + sys.stderr.write("%s: %s\n" % (k, v)) + sys.stderr.write('\n') + sys.stderr.write(err.read()) + sys.stderr.write('\n----\n') + sys.stderr.flush() + except: + sys.stderr.write('Traceback from buildlogger:\n') + traceback.print_exc(file=sys.stderr) + sys.stderr.flush() + return None + return wrapper + + +@traceback_to_stderr +def get_or_create_build(builder, buildnum, extra={}): + data = {'builder': builder, 'buildnum': buildnum} + data.update(extra) + response = post('build', data) + if response is None: + return None + return response['id'] + +@traceback_to_stderr +def create_test(build_id, test_filename, test_command, test_phase): + response = post('build/%s/test' % build_id, { + 'test_filename': test_filename, + 'command': test_command, + 'phase': test_phase, + }) + if response is None: + return None + return response['id'] + +@traceback_to_stderr +def append_test_logs(build_id, test_id, log_lines): + response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines) + if response is None: + return False + return True + +@traceback_to_stderr +def append_global_logs(build_id, log_lines): + """ + "global" logs are for the mongod(s) started by smoke.py + that last the duration of a test phase -- since there + may be output in here that is important but spans individual + tests, the buildlogs webapp handles these logs specially. + """ + response = post('build/%s' % build_id, data=log_lines) + if response is None: + return False + return True + +@traceback_to_stderr +def finish_test(build_id, test_id, failed=False): + response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={ + 'X-Sendlogs-Test-Done': 'true', + 'X-Sendlogs-Test-Failed': failed and 'true' or 'false', + }) + if response is None: + return False + return True + +def run_and_echo(command): + """ + this just calls the command, and returns its return code, + allowing stdout and stderr to work as normal. it is used + as a fallback when environment variables or python + dependencies cannot be configured, or when the logging + webapp is unavailable, etc + """ + proc = subprocess.Popen(command) + + def handle_sigterm(signum, frame): + try: + proc.send_signal(signum) + except AttributeError: + os.kill(proc.pid, signum) + orig_handler = signal.signal(signal.SIGTERM, handle_sigterm) + + proc.wait() + + signal.signal(signal.SIGTERM, orig_handler) + return proc.returncode + +class LogAppender(object): + def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10): + self.callback = callback + self.callback_args = args + + self.send_after_lines = send_after_lines + self.send_after_seconds = send_after_seconds + + self.buf = [] + self.retrybuf = [] + self.last_sent = time.time() + + def __call__(self, line): + self.buf.append((time.time(), line)) + + delay = time.time() - self.last_sent + if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds: + self.submit() + + # no return value is expected + + def submit(self): + if len(self.buf) + len(self.retrybuf) == 0: + return True + + args = list(self.callback_args) + args.append(list(self.buf) + self.retrybuf) + + self.last_sent = time.time() + + if self.callback(*args): + self.buf = [] + self.retrybuf = [] + return True + else: + self.retrybuf += self.buf + self.buf = [] + return False + + +def wrap_test(command): + """ + call the given command, intercept its stdout and stderr, + and send results in batches of 100 lines or 10s to the + buildlogger webapp + """ + + # get builder name and build number from environment + builder = os.environ.get('MONGO_BUILDER_NAME') + buildnum = os.environ.get('MONGO_BUILD_NUMBER') + + if builder is None or buildnum is None: + return run_and_echo(command) + + try: + buildnum = int(buildnum) + except ValueError: + sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum) + sys.stderr.flush() + return run_and_echo(command) + + # test takes some extra info + phase = os.environ.get('MONGO_PHASE', 'unknown') + test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown') + + build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_')) + build_info.pop('MONGO_BUILDER_NAME', None) + build_info.pop('MONGO_BUILD_NUMBER', None) + build_info.pop('MONGO_PHASE', None) + build_info.pop('MONGO_TEST_FILENAME', None) + + build_id = get_or_create_build(builder, buildnum, extra=build_info) + if not build_id: + return run_and_echo(command) + + test_id = create_test(build_id, test_filename, ' '.join(command), phase) + if not test_id: + return run_and_echo(command) + + # the peculiar formatting here matches what is printed by + # smoke.py when starting tests + output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id) + sys.stdout.write(' (output suppressed; see %s)\n' % output_url) + sys.stdout.flush() + + callback = LogAppender(callback=append_test_logs, args=(build_id, test_id)) + returncode = loop_and_callback(command, callback) + failed = bool(returncode != 0) + + # this will append any remaining unsubmitted logs, or + # return True if there are none left to submit + tries = 5 + while not callback.submit() and tries > 0: + sys.stderr.write('failed to finish sending test logs, retrying in 1s\n') + sys.stderr.flush() + time.sleep(1) + tries -= 1 + + tries = 5 + while not finish_test(build_id, test_id, failed) and tries > 5: + sys.stderr.write('failed to mark test finished, retrying in 1s\n') + sys.stderr.flush() + time.sleep(1) + tries -= 1 + + return returncode + +def wrap_global(command): + """ + call the given command, intercept its stdout and stderr, + and send results in batches of 100 lines or 10s to the + buildlogger webapp. see :func:`append_global_logs` for the + difference between "global" and "test" log output. + """ + + # get builder name and build number from environment + builder = os.environ.get('MONGO_BUILDER_NAME') + buildnum = os.environ.get('MONGO_BUILD_NUMBER') + + if builder is None or buildnum is None: + return run_and_echo(command) + + try: + buildnum = int(buildnum) + except ValueError: + sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n') + sys.stderr.write(traceback.format_exc()) + sys.stderr.flush() + return run_and_echo(command) + + build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_')) + build_info.pop('MONGO_BUILDER_NAME', None) + build_info.pop('MONGO_BUILD_NUMBER', None) + + build_id = get_or_create_build(builder, buildnum, extra=build_info) + if not build_id: + return run_and_echo(command) + + callback = LogAppender(callback=append_global_logs, args=(build_id, )) + returncode = loop_and_callback(command, callback) + + # this will append any remaining unsubmitted logs, or + # return True if there are none left to submit + tries = 5 + while not callback.submit() and tries > 0: + sys.stderr.write('failed to finish sending global logs, retrying in 1s\n') + sys.stderr.flush() + time.sleep(1) + tries -= 1 + + return returncode + +def loop_and_callback(command, callback): + """ + run the given command (a sequence of arguments, ordinarily + from sys.argv), and call the given callback with each line + of stdout or stderr encountered. after the command is finished, + callback is called once more with None instead of a string. + """ + proc = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + + def handle_sigterm(signum, frame): + try: + proc.send_signal(signum) + except AttributeError: + os.kill(proc.pid, signum) + + # register a handler to delegate SIGTERM + # to the child process + orig_handler = signal.signal(signal.SIGTERM, handle_sigterm) + + while proc.poll() is None: + try: + line = proc.stdout.readline().strip('\r\n') + line = utils.unicode_dammit(line) + callback(line) + except IOError: + # if the signal handler is called while + # we're waiting for readline() to return, + # don't show a traceback + break + + # There may be additional buffered output + for line in proc.stdout.readlines(): + callback(line.strip('\r\n')) + + # restore the original signal handler, if any + signal.signal(signal.SIGTERM, orig_handler) + return proc.returncode + + +if __name__ == '__main__': + # argv[0] is 'buildlogger.py' + del sys.argv[0] + + if sys.argv[0] in ('-g', '--global'): + # then this is wrapping a "global" command, and should + # submit global logs to the build, not test logs to a + # test within the build + del sys.argv[0] + wrapper = wrap_global + + else: + wrapper = wrap_test + + # if we are missing credentials or the json module, then + # we can't use buildlogger; so just echo output, but also + # log why we can't work. + if json is None: + sys.stderr.write('buildlogger: could not import a json module\n') + sys.stderr.flush() + wrapper = run_and_echo + + elif username is None or password is None: + sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file) + sys.stderr.flush() + wrapper = run_and_echo + + # otherwise wrap a test command as normal; the + # wrapper functions return the return code of + # the wrapped command, so that should be our + # exit code as well. + sys.exit(wrapper(sys.argv)) + diff --git a/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py b/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py new file mode 100644 index 00000000000..fee7efdc0c1 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python + +import re +import sys +import os, os.path +import utils +import time +from optparse import OptionParser + +def shouldKill( c, root=None ): + + if "smoke.py" in c: + return False + + if "emr.py" in c: + return False + + if "java" in c: + return False + + # if root directory is provided, see if command line matches mongod process running + # with the same data directory + + if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ): + return True + + if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0: + return True + + if c.find( "xml-data/build-dir" ) >= 0: # for bamboo + return True + + return False + +def killprocs( signal="", root=None ): + killed = 0 + + if sys.platform == 'win32': + return killed + + l = utils.getprocesslist() + print( "num procs:" + str( len( l ) ) ) + if len(l) == 0: + print( "no procs" ) + try: + print( execsys( "/sbin/ifconfig -a" ) ) + except Exception,e: + print( "can't get interfaces" + str( e ) ) + + for x in l: + x = x.lstrip() + if not shouldKill( x, root=root ): + continue + + pid = x.split( " " )[0] + print( "killing: " + x ) + utils.execsys( "/bin/kill " + signal + " " + pid ) + killed = killed + 1 + + return killed + + +def tryToRemove(path): + for _ in range(60): + try: + os.remove(path) + return True + except OSError, e: + errno = getattr(e, 'winerror', None) + # check for the access denied and file in use WindowsErrors + if errno in (5, 32): + print("os.remove(%s) failed, retrying in one second." % path) + time.sleep(1) + else: + raise e + return False + + +def cleanup( root , nokill ): + if nokill: + print "nokill requested, not killing anybody" + else: + if killprocs( root=root ) > 0: + time.sleep(3) + killprocs( "-9", root=root ) + + # delete all regular files, directories can stay + # NOTE: if we delete directories later, we can't delete diskfulltest + for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ): + for x in filenames: + foo = dirpath + "/" + x + if os.path.exists(foo): + if not tryToRemove(foo): + raise Exception("Couldn't remove file '%s' after 60 seconds" % foo) + +if __name__ == "__main__": + parser = OptionParser(usage="read the script") + parser.add_option("--nokill", dest='nokill', default=False, action='store_true') + (options, args) = parser.parse_args() + + root = "/data/db/" + if len(args) > 0: + root = args[0] + + cleanup( root , options.nokill ) diff --git a/src/mongo/gotools/test/legacy26/buildscripts/smoke.py b/src/mongo/gotools/test/legacy26/buildscripts/smoke.py new file mode 100755 index 00000000000..7c8da1108f9 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/buildscripts/smoke.py @@ -0,0 +1,1314 @@ +#!/usr/bin/env python + +# smoke.py: run some mongo tests. + +# Bugs, TODOs: + +# 0 Some tests hard-code pathnames relative to the mongo repository, +# so the smoke.py process and all its children must be run with the +# mongo repo as current working directory. That's kinda icky. + +# 1 The tests that are implemented as standalone executables ("test", +# "perftest"), don't take arguments for the dbpath, but +# unconditionally use "/tmp/unittest". + +# 2 mongod output gets intermingled with mongo output, and it's often +# hard to find error messages in the slop. Maybe have smoke.py do +# some fancier wrangling of child process output? + +# 3 Some test suites run their own mongods, and so don't need us to +# run any mongods around their execution. (It's harmless to do so, +# but adds noise in the output.) + +# 4 Running a separate mongo shell for each js file is slower than +# loading js files into one mongo shell process. Maybe have runTest +# queue up all filenames ending in ".js" and run them in one mongo +# shell at the "end" of testing? + +# 5 Right now small-oplog implies master/slave replication. Maybe +# running with replication should be an orthogonal concern. (And +# maybe test replica set replication, too.) + +# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills +# off all mongods on a box, which means you can't run two smoke.py +# jobs on the same host at once. So something's gotta change. + +from datetime import datetime +from itertools import izip +import glob +from optparse import OptionParser +import os +import pprint +import re +import shlex +import socket +import stat +from subprocess import (PIPE, Popen, STDOUT) +import sys +import time + +from pymongo import MongoClient +from pymongo.errors import OperationFailure +from pymongo import ReadPreference + +import cleanbb +import utils + +try: + import cPickle as pickle +except ImportError: + import pickle + +try: + from hashlib import md5 # new in 2.5 +except ImportError: + from md5 import md5 # deprecated in 2.5 + +try: + import json +except: + try: + import simplejson as json + except: + json = None + + +# TODO clean this up so we don't need globals... +mongo_repo = os.getcwd() #'./' +failfile = os.path.join(mongo_repo, 'failfile.smoke') +test_path = None +mongod_executable = None +mongod_port = None +shell_executable = None +continue_on_failure = None +file_of_commands_mode = False +start_mongod = True +temp_path = None +clean_every_n_tests = 1 +clean_whole_dbroot = False + +tests = [] +winners = [] +losers = {} +fails = [] # like losers but in format of tests + +# For replication hash checking +replicated_collections = [] +lost_in_slave = [] +lost_in_master = [] +screwy_in_slave = {} + +smoke_db_prefix = '' +small_oplog = False +small_oplog_rs = False + +test_report = { "results": [] } +report_file = None + +# This class just implements the with statement API, for a sneaky +# purpose below. +class Nothing(object): + def __enter__(self): + return self + def __exit__(self, type, value, traceback): + return not isinstance(value, Exception) + +def buildlogger(cmd, is_global=False): + # if the environment variable MONGO_USE_BUILDLOGGER + # is set to 'true', then wrap the command with a call + # to buildlogger.py, which sends output to the buidlogger + # machine; otherwise, return as usual. + if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true': + if is_global: + return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd + else: + return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd + return cmd + + +def clean_dbroot(dbroot="", nokill=False): + # Clean entire /data/db dir if --with-cleanbb, else clean specific database path. + if clean_whole_dbroot and not small_oplog: + dbroot = os.path.normpath(smoke_db_prefix + "/data/db") + if os.path.exists(dbroot): + print("clean_dbroot: %s" % dbroot) + cleanbb.cleanup(dbroot, nokill) + + +class mongod(object): + def __init__(self, **kwargs): + self.kwargs = kwargs + self.proc = None + self.auth = False + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + try: + self.stop() + except Exception, e: + print >> sys.stderr, "error shutting down mongod" + print >> sys.stderr, e + return not isinstance(value, Exception) + + def ensure_test_dirs(self): + utils.ensureDir(smoke_db_prefix + "/tmp/unittest/") + utils.ensureDir(smoke_db_prefix + "/data/") + utils.ensureDir(smoke_db_prefix + "/data/db/") + + def check_mongo_port(self, port=27017): + sock = socket.socket() + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + sock.settimeout(1) + sock.connect(("localhost", int(port))) + sock.close() + + def is_mongod_up(self, port=mongod_port): + try: + self.check_mongo_port(int(port)) + return True + except Exception,e: + print >> sys.stderr, e + return False + + def did_mongod_start(self, port=mongod_port, timeout=300): + while timeout > 0: + time.sleep(1) + is_up = self.is_mongod_up(port) + if is_up: + return True + timeout = timeout - 1 + print >> sys.stderr, "timeout starting mongod" + return False + + def start(self): + global mongod_port + global mongod + if self.proc: + print >> sys.stderr, "probable bug: self.proc already set in start()" + return + self.ensure_test_dirs() + dir_name = smoke_db_prefix + "/data/db/sconsTests/" + self.port = int(mongod_port) + self.slave = False + if 'slave' in self.kwargs: + dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/' + srcport = mongod_port + self.port += 1 + self.slave = True + + clean_dbroot(dbroot=dir_name, nokill=self.slave) + utils.ensureDir(dir_name) + + argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name] + # These parameters are alwas set for tests + # SERVER-9137 Added httpinterface parameter to keep previous behavior + argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface'] + if self.kwargs.get('small_oplog'): + argv += ["--master", "--oplogSize", "511"] + params = self.kwargs.get('set_parameters', None) + if params: + for p in params.split(','): argv += ['--setParameter', p] + if self.kwargs.get('small_oplog_rs'): + argv += ["--replSet", "foo", "--oplogSize", "511"] + if self.slave: + argv += ['--slave', '--source', 'localhost:' + str(srcport)] + if self.kwargs.get('no_journal'): + argv += ['--nojournal'] + if self.kwargs.get('no_preallocj'): + argv += ['--nopreallocj'] + if self.kwargs.get('auth'): + argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false'] + authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR') + if authMechanism != 'MONGODB-CR': + argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism] + self.auth = True + if self.kwargs.get('keyFile'): + argv += ['--keyFile', self.kwargs.get('keyFile')] + if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'): + argv += ['--sslMode', "requireSSL", + '--sslPEMKeyFile', 'jstests/libs/server.pem', + '--sslCAFile', 'jstests/libs/ca.pem', + '--sslWeakCertificateValidation'] + if self.kwargs.get('use_x509'): + argv += ['--clusterAuthMode','x509']; + self.auth = True + print "running " + " ".join(argv) + self.proc = self._start(buildlogger(argv, is_global=True)) + + if not self.did_mongod_start(self.port): + raise Exception("Failed to start mongod") + + if self.slave: + local = MongoClient(port=self.port, + read_preference=ReadPreference.SECONDARY_PREFERRED).local + synced = False + while not synced: + synced = True + for source in local.sources.find(fields=["syncedTo"]): + synced = synced and "syncedTo" in source and source["syncedTo"] + + def _start(self, argv): + """In most cases, just call subprocess.Popen(). On windows, + add the started process to a new Job Object, so that any + child processes of this process can be killed with a single + call to TerminateJobObject (see self.stop()). + """ + + if os.sys.platform == "win32": + # Create a job object with the "kill on job close" + # flag; this is inherited by child processes (ie + # the mongod started on our behalf by buildlogger) + # and lets us terminate the whole tree of processes + # rather than orphaning the mongod. + import win32job + + # Magic number needed to allow job reassignment in Windows 7 + # see: MSDN - Process Creation Flags - ms684863 + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + + proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB) + + self.job_object = win32job.CreateJobObject(None, '') + + job_info = win32job.QueryInformationJobObject( + self.job_object, win32job.JobObjectExtendedLimitInformation) + job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE + win32job.SetInformationJobObject( + self.job_object, + win32job.JobObjectExtendedLimitInformation, + job_info) + + win32job.AssignProcessToJobObject(self.job_object, proc._handle) + + else: + proc = Popen(argv) + + return proc + + def stop(self): + if not self.proc: + print >> sys.stderr, "probable bug: self.proc unset in stop()" + return + try: + if os.sys.platform == "win32": + import win32job + win32job.TerminateJobObject(self.job_object, -1) + import time + # Windows doesn't seem to kill the process immediately, so give it some time to die + time.sleep(5) + else: + # This function not available in Python 2.5 + self.proc.terminate() + except AttributeError: + from os import kill + kill(self.proc.pid, 15) + self.proc.wait() + sys.stderr.flush() + sys.stdout.flush() + + def wait_for_repl(self): + print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")" + MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000) + print "Replicated write completed -- done wait_for_repl" + +class Bug(Exception): + def __str__(self): + return 'bug in smoke.py: ' + super(Bug, self).__str__() + +class TestFailure(Exception): + pass + +class TestExitFailure(TestFailure): + def __init__(self, *args): + self.path = args[0] + self.status=args[1] + + def __str__(self): + return "test %s exited with status %d" % (self.path, self.status) + +class TestServerFailure(TestFailure): + def __init__(self, *args): + self.path = args[0] + self.status = -1 # this is meaningless as an exit code, but + # that's the point. + def __str__(self): + return 'mongod not running after executing test %s' % self.path + +def check_db_hashes(master, slave): + # Need to pause a bit so a slave might catch up... + if not slave.slave: + raise(Bug("slave instance doesn't have slave attribute set")) + + print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port) + master.wait_for_repl() + print "caught up!" + + # FIXME: maybe make this run dbhash on all databases? + for mongod in [master, slave]: + client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED) + mongod.dbhash = client.test.command("dbhash") + mongod.dict = mongod.dbhash["collections"] + + global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections + + replicated_collections += master.dict.keys() + + for coll in replicated_collections: + if coll not in slave.dict and coll not in lost_in_slave: + lost_in_slave.append(coll) + mhash = master.dict[coll] + shash = slave.dict[coll] + if mhash != shash: + mTestDB = MongoClient(port=master.port).test + sTestDB = MongoClient(port=slave.port, + read_preference=ReadPreference.SECONDARY_PREFERRED).test + mCount = mTestDB[coll].count() + sCount = sTestDB[coll].count() + stats = {'hashes': {'master': mhash, 'slave': shash}, + 'counts':{'master': mCount, 'slave': sCount}} + try: + mDocs = list(mTestDB[coll].find().sort("_id", 1)) + sDocs = list(sTestDB[coll].find().sort("_id", 1)) + mDiffDocs = list() + sDiffDocs = list() + for left, right in izip(mDocs, sDocs): + if left != right: + mDiffDocs.append(left) + sDiffDocs.append(right) + + stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs } + except Exception, e: + stats["error-docs"] = e; + + screwy_in_slave[coll] = stats + if mhash == "no _id _index": + mOplog = mTestDB.connection.local["oplog.$main"]; + oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \ + {"op":"c"}]}).sort("$natural", 1)) + print "oplog for %s" % mTestDB[coll].full_name + for doc in oplog_entries: + pprint.pprint(doc, width=200) + + + for db in slave.dict.keys(): + if db not in master.dict and db not in lost_in_master: + lost_in_master.append(db) + + +def ternary( b , l="true", r="false" ): + if b: + return l + return r + +# Blech. +def skipTest(path): + basename = os.path.basename(path) + parentPath = os.path.dirname(path) + parentDir = os.path.basename(parentPath) + if small_oplog: # For tests running in parallel + if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js", + "connections_opened.js", "opcounters.js", "dbadmin.js"]: + return True + if use_ssl: + # Skip tests using mongobridge since it does not support SSL + # TODO: Remove when SERVER-10910 has been resolved. + if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js", + "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]: + return True + # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved + if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js", + "unix_socket1.js"]: + return True; + if auth or keyFile or use_x509: # For tests running with auth + # Skip any tests that run with auth explicitly + if parentDir.lower() == "auth" or "auth" in basename.lower(): + return True + if parentPath == mongo_repo: # Skip client tests + return True + if parentDir == "tool": # SERVER-6368 + return True + if parentDir == "dur": # SERVER-7317 + return True + if parentDir == "disk": # SERVER-7356 + return True + + authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589, + ("jstests", "killop.js"), # SERVER-10128 + ("sharding", "sync3.js"), # SERVER-6388 for this and those below + ("sharding", "sync6.js"), + ("sharding", "parallel.js"), + ("jstests", "bench_test1.js"), + ("jstests", "bench_test2.js"), + ("jstests", "bench_test3.js"), + ("core", "bench_test1.js"), + ("core", "bench_test2.js"), + ("core", "bench_test3.js"), + ] + + if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]: + return True + + return False + +forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets"] +# look for jstests and one of the above suites separated by either posix or windows slashes +forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs))) +def setShellWriteModeForTest(path, argv): + swm = shell_write_mode + if swm == "legacy": # change when the default changes to "commands" + if use_write_commands or forceCommandsRE.search(path): + swm = "commands" + argv += ["--writeMode", swm] + +def runTest(test, result): + # result is a map containing test result details, like result["url"] + + # test is a tuple of ( filename , usedb<bool> ) + # filename should be a js file to run + # usedb is true if the test expects a mongod to be running + + (path, usedb) = test + (ignore, ext) = os.path.splitext(path) + test_mongod = mongod() + mongod_is_up = test_mongod.is_mongod_up(mongod_port) + result["mongod_running_at_start"] = mongod_is_up; + + if file_of_commands_mode: + # smoke.py was invoked like "--mode files --from-file foo", + # so don't try to interpret the test path too much + if os.sys.platform == "win32": + argv = [path] + else: + argv = shlex.split(path) + path = argv[0] + # if the command is a python script, use the script name + if os.path.basename(path) in ('python', 'python.exe'): + path = argv[1] + elif ext == ".js": + argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism] + + setShellWriteModeForTest(path, argv) + + if not usedb: + argv += ["--nodb"] + if small_oplog or small_oplog_rs: + argv += ["--eval", 'testingReplication = true;'] + if use_ssl: + argv += ["--ssl", + "--sslPEMKeyFile", "jstests/libs/client.pem", + "--sslCAFile", "jstests/libs/ca.pem", + "--sslAllowInvalidCertificates"] + argv += [path] + elif ext in ["", ".exe"]: + # Blech. + if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]: + argv = [path] + # default data directory for test and perftest is /tmp/unittest + if smoke_db_prefix: + dir_name = smoke_db_prefix + '/unittests' + argv.extend(["--dbpath", dir_name] ) + # more blech + elif os.path.basename(path) in ['mongos', 'mongos.exe']: + argv = [path, "--test"] + else: + argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path, + "--port", mongod_port] + else: + raise Bug("fell off in extension case: %s" % path) + + mongo_test_filename = os.path.basename(path) + + # sys.stdout.write() is more atomic than print, so using it prevents + # lines being interrupted by, e.g., child processes + sys.stdout.write(" *******************************************\n") + sys.stdout.write(" Test : %s ...\n" % mongo_test_filename) + sys.stdout.flush() + + # FIXME: we don't handle the case where the subprocess + # hangs... that's bad. + if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv : + evalString = 'load("jstests/libs/use_extended_timeout.js");' + \ + 'TestData = new Object();' + \ + 'TestData.testPath = "' + path + '";' + \ + 'TestData.testFile = "' + os.path.basename( path ) + '";' + \ + 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \ + 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \ + 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \ + 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \ + 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \ + 'TestData.auth = ' + ternary( auth ) + ";" + \ + 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \ + 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \ + 'TestData.authMechanism = ' + ternary( authMechanism, + '"' + str(authMechanism) + '"', 'null') + ";" + \ + 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \ + 'TestData.useX509 = ' + ternary( use_x509 ) + ";" + # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js) + evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";' + evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";' + if os.sys.platform == "win32": + # double quotes in the evalString on windows; this + # prevents the backslashes from being removed when + # the shell (i.e. bash) evaluates this string. yuck. + evalString = evalString.replace('\\', '\\\\') + + if auth and usedb: + evalString += 'jsTest.authenticate(db.getMongo());' + + argv = argv + [ '--eval', evalString] + + if argv[0].endswith( 'test' ) or argv[0].endswith( 'test.exe' ): + if no_preallocj : + argv = argv + [ '--nopreallocj' ] + if temp_path: + argv = argv + [ '--tempPath', temp_path ] + + + sys.stdout.write(" Command : %s\n" % ' '.join(argv)) + sys.stdout.write(" Date : %s\n" % datetime.now().ctime()) + sys.stdout.flush() + + os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename + t1 = time.time() + + proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0) + first_line = proc.stdout.readline() # Get suppressed output URL + m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line) + if m: + result["url"] = m.group("url") + sys.stdout.write(first_line) + sys.stdout.flush() + while True: + # print until subprocess's stdout closed. + # Not using "for line in file" since that has unwanted buffering. + line = proc.stdout.readline() + if not line: + break; + + sys.stdout.write(line) + sys.stdout.flush() + + proc.wait() # wait if stdout is closed before subprocess exits. + r = proc.returncode + + t2 = time.time() + del os.environ['MONGO_TEST_FILENAME'] + + timediff = t2 - t1 + # timediff is seconds by default + scale = 1 + suffix = "seconds" + # if timediff is less than 10 seconds use ms + if timediff < 10: + scale = 1000 + suffix = "ms" + # if timediff is more than 60 seconds use minutes + elif timediff > 60: + scale = 1.0 / 60.0 + suffix = "minutes" + sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix)) + sys.stdout.flush() + + result["exit_code"] = r + + is_mongod_still_up = test_mongod.is_mongod_up(mongod_port) + if not is_mongod_still_up: + print "mongod is not running after test" + result["mongod_running_at_end"] = is_mongod_still_up; + if start_mongod: + raise TestServerFailure(path) + + result["mongod_running_at_end"] = is_mongod_still_up; + + if r != 0: + raise TestExitFailure(path, r) + + print "" + +def run_tests(tests): + # FIXME: some suites of tests start their own mongod, so don't + # need this. (So long as there are no conflicts with port, + # dbpath, etc., and so long as we shut ours down properly, + # starting this mongod shouldn't break anything, though.) + + # The reason we want to use "with" is so that we get __exit__ semantics + # but "with" is only supported on Python 2.5+ + + if start_mongod: + master = mongod(small_oplog_rs=small_oplog_rs, + small_oplog=small_oplog, + no_journal=no_journal, + set_parameters=set_parameters, + no_preallocj=no_preallocj, + auth=auth, + authMechanism=authMechanism, + keyFile=keyFile, + use_ssl=use_ssl, + use_x509=use_x509).__enter__() + else: + master = Nothing() + try: + if small_oplog: + slave = mongod(slave=True, + set_parameters=set_parameters).__enter__() + elif small_oplog_rs: + slave = mongod(slave=True, + small_oplog_rs=small_oplog_rs, + small_oplog=small_oplog, + no_journal=no_journal, + set_parameters=set_parameters, + no_preallocj=no_preallocj, + auth=auth, + authMechanism=authMechanism, + keyFile=keyFile, + use_ssl=use_ssl, + use_x509=use_x509).__enter__() + primary = MongoClient(port=master.port); + + primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [ + {'_id': 0, 'host':'localhost:%s' % master.port}, + {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}}) + + ismaster = False + while not ismaster: + result = primary.admin.command("ismaster"); + ismaster = result["ismaster"] + time.sleep(1) + else: + slave = Nothing() + + try: + if small_oplog or small_oplog_rs: + master.wait_for_repl() + + for tests_run, test in enumerate(tests): + tests_run += 1 # enumerate from 1, python 2.5 compatible + test_result = { "start": time.time() } + + (test_path, use_db) = test + + if test_path.startswith(mongo_repo + os.path.sep): + test_result["test_file"] = test_path[len(mongo_repo)+1:] + else: + # user could specify a file not in repo. leave it alone. + test_result["test_file"] = test_path + + try: + if skipTest(test_path): + test_result["status"] = "skip" + + print "skipping " + test_path + else: + fails.append(test) + runTest(test, test_result) + fails.pop() + winners.append(test) + + test_result["status"] = "pass" + + test_result["end"] = time.time() + test_result["elapsed"] = test_result["end"] - test_result["start"] + test_report["results"].append( test_result ) + if small_oplog or small_oplog_rs: + master.wait_for_repl() + # check the db_hashes + if isinstance(slave, mongod): + check_db_hashes(master, slave) + check_and_report_replication_dbhashes() + + elif use_db: # reach inside test and see if "usedb" is true + if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0: + # Restart mongod periodically to clean accumulated test data + # clean_dbroot() is invoked by mongod.start() + master.__exit__(None, None, None) + master = mongod(small_oplog_rs=small_oplog_rs, + small_oplog=small_oplog, + no_journal=no_journal, + set_parameters=set_parameters, + no_preallocj=no_preallocj, + auth=auth, + authMechanism=authMechanism, + keyFile=keyFile, + use_ssl=use_ssl, + use_x509=use_x509).__enter__() + + except TestFailure, f: + test_result["end"] = time.time() + test_result["elapsed"] = test_result["end"] - test_result["start"] + test_result["error"] = str(f) + test_result["status"] = "fail" + test_report["results"].append( test_result ) + try: + print f + # Record the failing test and re-raise. + losers[f.path] = f.status + raise f + except TestServerFailure, f: + return 2 + except TestFailure, f: + if not continue_on_failure: + return 1 + if isinstance(slave, mongod): + check_db_hashes(master, slave) + finally: + slave.__exit__(None, None, None) + finally: + master.__exit__(None, None, None) + return 0 + + +def check_and_report_replication_dbhashes(): + def missing(lst, src, dst): + if lst: + print """The following collections were present in the %s but not the %s +at the end of testing:""" % (src, dst) + for db in lst: + print db + + missing(lost_in_slave, "master", "slave") + missing(lost_in_master, "slave", "master") + if screwy_in_slave: + print """The following collections has different hashes in master and slave +at the end of testing:""" + for coll in screwy_in_slave.keys(): + stats = screwy_in_slave[coll] + # Counts are "approx" because they are collected after the dbhash runs and may not + # reflect the states of the collections that were hashed. If the hashes differ, one + # possibility is that a test exited with writes still in-flight. + print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave']) + if "docs" in stats: + if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or + ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)): + print "All docs matched!" + else: + print "Different Docs" + print "Master docs:" + pprint.pprint(stats["docs"]["master"], indent=2) + print "Slave docs:" + pprint.pprint(stats["docs"]["slave"], indent=2) + if "error-docs" in stats: + print "Error getting docs to diff:" + pprint.pprint(stats["error-docs"]) + return True + + if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave): + print "replication ok for %d collections" % (len(replicated_collections)) + + return False + + +def report(): + print "%d tests succeeded" % len(winners) + num_missed = len(tests) - (len(winners) + len(losers.keys())) + if num_missed: + print "%d tests didn't get run" % num_missed + if losers: + print "The following tests failed (with exit code):" + for loser in losers: + print "%s\t%d" % (loser, losers[loser]) + + test_result = { "start": time.time() } + if check_and_report_replication_dbhashes(): + test_result["end"] = time.time() + test_result["elapsed"] = test_result["end"] - test_result["start"] + test_result["test_file"] = "/#dbhash#" + test_result["error"] = "dbhash mismatch" + test_result["status"] = "fail" + test_report["results"].append( test_result ) + + if report_file: + f = open( report_file, "wb" ) + f.write( json.dumps( test_report ) ) + f.close() + + if losers or lost_in_slave or lost_in_master or screwy_in_slave: + raise Exception("Test failures") + +# Keys are the suite names (passed on the command line to smoke.py) +# Values are pairs: (filenames, <start mongod before running tests>) +suiteGlobalConfig = {"js": ("core/*.js", True), + "quota": ("quota/*.js", True), + "jsPerf": ("perf/*.js", True), + "disk": ("disk/*.js", True), + "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True), + "noPassthrough": ("noPassthrough/*.js", False), + "parallel": ("parallel/*.js", True), + "clone": ("clone/*.js", False), + "repl": ("repl/*.js", False), + "replSets": ("replsets/*.js", False), + "dur": ("dur/*.js", False), + "auth": ("auth/*.js", False), + "sharding": ("sharding/*.js", False), + "tool": ("tool/*.js", False), + "aggregation": ("aggregation/*.js", True), + "multiVersion": ("multiVersion/*.js", True), + "failPoint": ("fail_point/*.js", False), + "ssl": ("ssl/*.js", True), + "sslSpecial": ("sslSpecial/*.js", True), + "jsCore": ("core/*.js", True), + "gle": ("gle/*.js", True), + "slow1": ("slow1/*.js", True), + "slow2": ("slow2/*.js", True), + } + +def get_module_suites(): + """Attempts to discover and return information about module test suites + + Returns a dictionary of module suites in the format: + + { + "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>", + ... + } + + This means the values of this dictionary can be used as "glob"s to match all jstests in the + suite directory that don't start with an underscore + + The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js' + + NOTE: This assumes that if we have more than one module the suite names don't conflict + """ + modules_directory = 'src/mongo/db/modules' + test_suites = {} + + # Return no suites if we have no modules + if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory): + return {} + + module_directories = os.listdir(modules_directory) + for module_directory in module_directories: + + test_directory = os.path.join(modules_directory, module_directory, "jstests") + + # Skip this module if it has no "jstests" directory + if not os.path.exists(test_directory) or not os.path.isdir(test_directory): + continue + + # Get all suites for this module + for test_suite in os.listdir(test_directory): + test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js") + + return test_suites + +def expand_suites(suites,expandUseDB=True): + """Takes a list of suites and expands to a list of tests according to a set of rules. + + Keyword arguments: + suites -- list of suites specified by the user + expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database + (default True) + + This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and + "all"), detection of suites in the "modules" directory, and enumerating the test files in a + given suite. It returns a list of tests of the form (path_to_test, usedb), where the second + part of the tuple specifies whether the test is run against the database (see --nodb in the + mongo shell) + + """ + globstr = None + tests = [] + module_suites = get_module_suites() + for suite in suites: + if suite == 'all': + return expand_suites(['test', + 'perf', + 'jsCore', + 'jsPerf', + 'noPassthroughWithMongod', + 'noPassthrough', + 'clone', + 'parallel', + 'repl', + 'auth', + 'sharding', + 'slow1', + 'slow2', + 'tool'], + expandUseDB=expandUseDB) + if suite == 'test': + if os.sys.platform == "win32": + program = 'test.exe' + else: + program = 'test' + (globstr, usedb) = (program, False) + elif suite == 'perf': + if os.sys.platform == "win32": + program = 'perftest.exe' + else: + program = 'perftest' + (globstr, usedb) = (program, False) + elif suite == 'mongosTest': + if os.sys.platform == "win32": + program = 'mongos.exe' + else: + program = 'mongos' + tests += [(os.path.join(mongo_repo, program), False)] + elif os.path.exists( suite ): + usedb = True + for name in suiteGlobalConfig: + if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ): + usedb = suiteGlobalConfig[name][1] + break + tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ] + elif suite in module_suites: + # Currently we connect to a database in all module tests since there's no mechanism yet + # to configure it independently + usedb = True + paths = glob.glob(module_suites[suite]) + paths.sort() + tests += [(path, usedb) for path in paths] + else: + try: + globstr, usedb = suiteGlobalConfig[suite] + except KeyError: + raise Exception('unknown test suite %s' % suite) + + if globstr: + if usedb and not expandUseDB: + tests += [ (suite,False) ] + else: + if globstr.endswith('.js'): + loc = 'jstests/' + else: + loc = '' + globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr))) + globstr = os.path.normpath(globstr) + paths = glob.glob(globstr) + paths.sort() + tests += [(path, usedb) for path in paths] + + return tests + +def add_exe(e): + if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ): + e += ".exe" + return e + +def set_globals(options, tests): + global mongod_executable, mongod_port, shell_executable, continue_on_failure + global small_oplog, small_oplog_rs + global no_journal, set_parameters, set_parameters_mongos, no_preallocj + global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod + global use_ssl, use_x509 + global file_of_commands_mode + global report_file, shell_write_mode, use_write_commands + global temp_path + global clean_every_n_tests + global clean_whole_dbroot + + start_mongod = options.start_mongod + if hasattr(options, 'use_ssl'): + use_ssl = options.use_ssl + if hasattr(options, 'use_x509'): + use_x509 = options.use_x509 + use_ssl = use_ssl or use_x509 + #Careful, this can be called multiple times + test_path = options.test_path + + mongod_executable = add_exe(options.mongod_executable) + if not os.path.exists(mongod_executable): + raise Exception("no mongod found in this directory.") + + mongod_port = options.mongod_port + + shell_executable = add_exe( options.shell_executable ) + if not os.path.exists(shell_executable): + raise Exception("no mongo shell found in this directory.") + + continue_on_failure = options.continue_on_failure + smoke_db_prefix = options.smoke_db_prefix + small_oplog = options.small_oplog + if hasattr(options, "small_oplog_rs"): + small_oplog_rs = options.small_oplog_rs + no_journal = options.no_journal + set_parameters = options.set_parameters + set_parameters_mongos = options.set_parameters_mongos + no_preallocj = options.no_preallocj + auth = options.auth + authMechanism = options.authMechanism + keyFile = options.keyFile + + clean_every_n_tests = options.clean_every_n_tests + clean_whole_dbroot = options.with_cleanbb + + if auth and not keyFile: + # if only --auth was given to smoke.py, load the + # default keyFile from jstests/libs/authTestsKey + keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey') + + if keyFile: + f = open(keyFile, 'r') + keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace + f.close() + os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR) + else: + keyFileData = None + + # if smoke.py is running a list of commands read from a + # file (or stdin) rather than running a suite of js tests + file_of_commands_mode = options.File and options.mode == 'files' + # generate json report + report_file = options.report_file + temp_path = options.temp_path + + use_write_commands = options.use_write_commands + shell_write_mode = options.shell_write_mode + +def file_version(): + return md5(open(__file__, 'r').read()).hexdigest() + +def clear_failfile(): + if os.path.exists(failfile): + os.remove(failfile) + +def run_old_fails(): + global tests + + try: + f = open(failfile, 'r') + state = pickle.load(f) + f.close() + except Exception: + try: + f.close() + except: + pass + clear_failfile() + return # This counts as passing so we will run all tests + + if ('version' not in state or state['version'] != file_version()): + print "warning: old version of failfile.smoke detected. skipping recent fails" + clear_failfile() + return + + testsAndOptions = state['testsAndOptions'] + tests = [x[0] for x in testsAndOptions] + passed = [] + try: + for (i, (test, options)) in enumerate(testsAndOptions): + # SERVER-5102: until we can figure out a better way to manage + # dependencies of the --only-old-fails build phase, just skip + # tests which we can't safely run at this point + path, usedb = test + + if not os.path.exists(path): + passed.append(i) + winners.append(test) + continue + + filename = os.path.basename(path) + if filename in ('test', 'test.exe') or filename.endswith('.js'): + set_globals(options, [filename]) + oldWinners = len(winners) + run_tests([test]) + if len(winners) != oldWinners: # can't use return value due to continue_on_failure + passed.append(i) + finally: + for offset, i in enumerate(passed): + testsAndOptions.pop(i - offset) + + if testsAndOptions: + f = open(failfile, 'w') + state = {'version':file_version(), 'testsAndOptions':testsAndOptions} + pickle.dump(state, f) + else: + clear_failfile() + + report() # exits with failure code if there is an error + +def add_to_failfile(tests, options): + try: + f = open(failfile, 'r') + testsAndOptions = pickle.load(f)["testsAndOptions"] + except Exception: + testsAndOptions = [] + + for test in tests: + if (test, options) not in testsAndOptions: + testsAndOptions.append( (test, options) ) + + state = {'version':file_version(), 'testsAndOptions':testsAndOptions} + f = open(failfile, 'w') + pickle.dump(state, f) + + + +def main(): + global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog + global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth + global keyFile, smoke_db_prefix, test_path, use_write_commands + + parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*") + parser.add_option('--mode', dest='mode', default='suite', + help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)') + # Some of our tests hard-code pathnames e.g., to execute, so until + # that changes we don't have the freedom to run from anyplace. + # parser.add_option('--mongo-repo', dest='mongo_repo', default=None, + parser.add_option('--test-path', dest='test_path', default=None, + help="Path to the test executables to run, " + "currently only used for 'client' (%default)") + parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'), + help='Path to mongod to run (%default)') + parser.add_option('--port', dest='mongod_port', default="27999", + help='Port the mongod will bind to (%default)') + parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'), + help='Path to mongo, for .js test files (%default)') + parser.add_option('--continue-on-failure', dest='continue_on_failure', + action="store_true", default=False, + help='If supplied, continue testing even after a test fails') + parser.add_option('--from-file', dest='File', + help="Run tests/suites named in FILE, one test per line, '-' means stdin") + parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix, + help="Prefix to use for the mongods' dbpaths ('%default')") + parser.add_option('--small-oplog', dest='small_oplog', default=False, + action="store_true", + help='Run tests with master/slave replication & use a small oplog') + parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False, + action="store_true", + help='Run tests with replica set replication & use a small oplog') + parser.add_option('--nojournal', dest='no_journal', default=False, + action="store_true", + help='Do not turn on journaling in tests') + parser.add_option('--nopreallocj', dest='no_preallocj', default=False, + action="store_true", + help='Do not preallocate journal files in tests') + parser.add_option('--auth', dest='auth', default=False, + action="store_true", + help='Run standalone mongods in tests with authentication enabled') + parser.add_option('--use-x509', dest='use_x509', default=False, + action="store_true", + help='Use x509 auth for internal cluster authentication') + parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR', + help='Use the given authentication mechanism, when --auth is used.') + parser.add_option('--keyFile', dest='keyFile', default=None, + help='Path to keyFile to use to run replSet and sharding tests with authentication enabled') + parser.add_option('--ignore', dest='ignore_files', default=None, + help='Pattern of files to ignore in tests') + parser.add_option('--only-old-fails', dest='only_old_fails', default=False, + action="store_true", + help='Check the failfile and only run all tests that failed last time') + parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False, + action="store_true", + help='Clear the failfile. Do this if all tests pass') + parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true", + default=False, + help='Clear database files before first test') + parser.add_option('--clean-every', dest='clean_every_n_tests', type='int', + default=20, + help='Clear database files every N tests [default %default]') + parser.add_option('--dont-start-mongod', dest='start_mongod', default=True, + action='store_false', + help='Do not start mongod before commencing test running') + parser.add_option('--use-ssl', dest='use_ssl', default=False, + action='store_true', + help='Run mongo shell and mongod instances with SSL encryption') + parser.add_option('--set-parameters', dest='set_parameters', default="", + help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ') + parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="", + help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ') + parser.add_option('--temp-path', dest='temp_path', default=None, + help='If present, passed as --tempPath to unittests and dbtests') + # Buildlogger invocation from command line + parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None, + action="store", help='Set the "builder name" for buildlogger') + parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None, + action="store", help='Set the "build number" for buildlogger') + parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None, + action="store", help='Set the url root for the buildlogger service') + parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None, + action="store", help='Path to Python file containing buildlogger credentials') + parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None, + action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)') + parser.add_option('--report-file', dest='report_file', default=None, + action='store', + help='Path to generate detailed json report containing all test details') + parser.add_option('--use-write-commands', dest='use_write_commands', default=False, + action='store_true', + help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default') + parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy", + help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)') + + global tests + (options, tests) = parser.parse_args() + + set_globals(options, tests) + + buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials) + if all(buildlogger_opts): + os.environ['MONGO_USE_BUILDLOGGER'] = 'true' + os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder + os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum + os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials + if options.buildlogger_phase: + os.environ['MONGO_PHASE'] = options.buildlogger_phase + elif any(buildlogger_opts): + # some but not all of the required options were sete + raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials") + + if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py + os.environ['BUILDLOGGER_URL'] = options.buildlogger_url + + if options.File: + if options.File == '-': + tests = sys.stdin.readlines() + else: + f = open(options.File) + tests = f.readlines() + tests = [t.rstrip('\n') for t in tests] + + if options.only_old_fails: + run_old_fails() + return + elif options.reset_old_fails: + clear_failfile() + return + + # If we're in suite mode, tests is a list of names of sets of tests. + if options.mode == 'suite': + tests = expand_suites(tests) + elif options.mode == 'files': + tests = [(os.path.abspath(test), start_mongod) for test in tests] + + if options.ignore_files != None : + ignore_patt = re.compile( options.ignore_files ) + print "Ignoring files with pattern: ", ignore_patt + + def ignore_test( test ): + if ignore_patt.search( test[0] ) != None: + print "Ignoring test ", test[0] + return False + else: + return True + + tests = filter( ignore_test, tests ) + + if not tests: + print "warning: no tests specified" + return + + if options.with_cleanbb: + clean_dbroot(nokill=True) + + test_report["start"] = time.time() + test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port) + try: + run_tests(tests) + finally: + add_to_failfile(fails, options) + + test_report["end"] = time.time() + test_report["elapsed"] = test_report["end"] - test_report["start"] + test_report["failures"] = len(losers.keys()) + test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port) + if report_file: + f = open( report_file, "wb" ) + f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) ) + f.close() + + report() + +if __name__ == "__main__": + main() diff --git a/src/mongo/gotools/test/legacy26/buildscripts/utils.py b/src/mongo/gotools/test/legacy26/buildscripts/utils.py new file mode 100644 index 00000000000..68273ee69c8 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/buildscripts/utils.py @@ -0,0 +1,230 @@ + +import codecs +import re +import socket +import time +import os +import os.path +import itertools +import subprocess +import sys +import hashlib + +# various utilities that are handy + +def getAllSourceFiles( arr=None , prefix="." ): + if arr is None: + arr = [] + + if not os.path.isdir( prefix ): + # assume a file + arr.append( prefix ) + return arr + + for x in os.listdir( prefix ): + if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ): + continue + full = prefix + "/" + x + if os.path.isdir( full ) and not os.path.islink( full ): + getAllSourceFiles( arr , full ) + else: + if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ): + full = full.replace( "//" , "/" ) + arr.append( full ) + + return arr + + +def getGitBranch(): + if not os.path.exists( ".git" ) or not os.path.isdir(".git"): + return None + + version = open( ".git/HEAD" ,'r' ).read().strip() + if not version.startswith( "ref: " ): + return version + version = version.split( "/" ) + version = version[len(version)-1] + return version + +def getGitBranchString( prefix="" , postfix="" ): + t = re.compile( '[/\\\]' ).split( os.getcwd() ) + if len(t) > 2 and t[len(t)-1] == "mongo": + par = t[len(t)-2] + m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par ) + if m is not None: + return prefix + m.group(1).lower() + postfix + if par.find("Nightly") > 0: + return "" + + + b = getGitBranch() + if b == None or b == "master": + return "" + return prefix + b + postfix + +def getGitVersion(): + if not os.path.exists( ".git" ) or not os.path.isdir(".git"): + return "nogitversion" + + version = open( ".git/HEAD" ,'r' ).read().strip() + if not version.startswith( "ref: " ): + return version + version = version[5:] + f = ".git/" + version + if not os.path.exists( f ): + return version + return open( f , 'r' ).read().strip() + +def execsys( args ): + import subprocess + if isinstance( args , str ): + r = re.compile( "\s+" ) + args = r.split( args ) + p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE ) + r = p.communicate() + return r; + +def getprocesslist(): + raw = "" + try: + raw = execsys( "/bin/ps axww" )[0] + except Exception,e: + print( "can't get processlist: " + str( e ) ) + + r = re.compile( "[\r\n]+" ) + return r.split( raw ) + +def removeIfInList( lst , thing ): + if thing in lst: + lst.remove( thing ) + +def findVersion( root , choices ): + for c in choices: + if ( os.path.exists( root + c ) ): + return root + c + raise "can't find a version of [" + root + "] choices: " + choices + +def choosePathExist( choices , default=None): + for c in choices: + if c != None and os.path.exists( c ): + return c + return default + +def filterExists(paths): + return filter(os.path.exists, paths) + +def ensureDir( name ): + d = os.path.dirname( name ) + if not os.path.exists( d ): + print( "Creating dir: " + name ); + os.makedirs( d ) + if not os.path.exists( d ): + raise "Failed to create dir: " + name + + +def distinctAsString( arr ): + s = set() + for x in arr: + s.add( str(x) ) + return list(s) + +def checkMongoPort( port=27017 ): + sock = socket.socket() + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + sock.settimeout(1) + sock.connect(("localhost", port)) + sock.close() + + +def didMongodStart( port=27017 , timeout=20 ): + while timeout > 0: + time.sleep( 1 ) + try: + checkMongoPort( port ) + return True + except Exception,e: + print( e ) + timeout = timeout - 1 + return False + +def which(executable): + if sys.platform == 'win32': + paths = os.environ.get('Path', '').split(';') + else: + paths = os.environ.get('PATH', '').split(':') + + for path in paths: + path = os.path.expandvars(path) + path = os.path.expanduser(path) + path = os.path.abspath(path) + executable_path = os.path.join(path, executable) + if os.path.exists(executable_path): + return executable_path + + return executable + +def md5sum( file ): + #TODO error handling, etc.. + return execsys( "md5sum " + file )[0].partition(" ")[0] + +def md5string( a_string ): + return hashlib.md5(a_string).hexdigest() + +def find_python(min_version=(2, 5)): + try: + if sys.version_info >= min_version: + return sys.executable + except AttributeError: + # In case the version of Python is somehow missing sys.version_info or sys.executable. + pass + + version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE) + binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python') + for binary in binaries: + try: + out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() + for stream in (out, err): + match = version.search(stream) + if match: + versiontuple = tuple(map(int, match.group(1).split('.'))) + if versiontuple >= min_version: + return which(binary) + except: + pass + + raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version)) + +def smoke_command(*args): + # return a list of arguments that comprises a complete + # invocation of smoke.py + here = os.path.dirname(__file__) + smoke_py = os.path.abspath(os.path.join(here, 'smoke.py')) + # the --with-cleanbb argument causes smoke.py to run + # buildscripts/cleanbb.py before each test phase; this + # prevents us from running out of disk space on slaves + return [find_python(), smoke_py, '--with-cleanbb'] + list(args) + +def run_smoke_command(*args): + # to run a command line script from a scons Alias (or any + # Action), the command sequence must be enclosed in a list, + # otherwise SCons treats it as a list of dependencies. + return [smoke_command(*args)] + +# unicode is a pain. some strings cannot be unicode()'d +# but we want to just preserve the bytes in a human-readable +# fashion. this codec error handler will substitute the +# repr() of the offending bytes into the decoded string +# at the position they occurred +def replace_with_repr(unicode_error): + offender = unicode_error.object[unicode_error.start:unicode_error.end] + return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end) + +codecs.register_error('repr', replace_with_repr) + +def unicode_dammit(string, encoding='utf8'): + # convert a string to a unicode, using the Python + # representation of non-ascii bytes when necessary + # + # name inpsired by BeautifulSoup's "UnicodeDammit" + return string.decode(encoding, 'repr') + diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey b/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey new file mode 100644 index 00000000000..573898a4f05 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey @@ -0,0 +1 @@ +This key is only for running the suite with authentication dont use it in any tests directly diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem b/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem new file mode 100644 index 00000000000..f739ef0627b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC +VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w +DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0 +IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz +MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI +DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH +ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx +GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27 +nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz +hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN +BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM +hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB +2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E +qQ== +-----END CERTIFICATE----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/client.pem b/src/mongo/gotools/test/legacy26/jstests/libs/client.pem new file mode 100644 index 00000000000..85ace4fd40b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/client.pem @@ -0,0 +1,101 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 7 (0x7) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus + Validity + Not Before: Aug 23 14:55:32 2013 GMT + Not After : Jan 7 14:55:32 2041 GMT + Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a: + 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1: + 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51: + 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f: + 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79: + 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c: + 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae: + 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd: + c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53: + a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31: + 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35: + 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98: + be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18: + b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe: + a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15: + 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2: + 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4: + 6e:a7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C + X509v3 Authority Key Identifier: + keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16 + + Signature Algorithm: sha1WithRSAEncryption + 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d: + f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5: + f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db: + af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87: + 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35: + 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa: + 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85: + 24:18 +-----BEGIN CERTIFICATE----- +MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0 +NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET +MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b +qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM +zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V +rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad +STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B +MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ +BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 +aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw +FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54 +xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb +r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh +9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee +p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y +LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j +mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW +WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9 +jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+ +flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4 +H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m +2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4 +tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU +w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S +eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/ +vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC +yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn +LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s +9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo +czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS +q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop +59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4 +9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9 +SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn +X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU +0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52 +re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT +F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3 +57rGT6p0OuM8qbrTzpv3JMrm +-----END PRIVATE KEY----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem b/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem new file mode 100644 index 00000000000..276e62644b6 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIC7jCCAlegAwIBAgIBDDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1 +MjUzMVoXDTQxMDQyMjE1MjUzMVowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP +MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZjbGllbnQwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBALX6DqSWRJBEJJRIRqG5X3cFHzse5jGIdV8fTqikaVitvuhs +15z1njzfqBQZMJBCEvNb4eaenXJRMBDkEOcbfy6ah+ZLLqGFy7b6OxTROfx++3fT +gsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN/ufbH2sX451nVd+j6oAz0dTz7RvhAgMB +AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh +dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjciYidtPfd5ILsm7c2yYGV99vwjAf +BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB +gQCgs74YrlZ6nivONRO8tNWi+gJ1TcWbQV+5yfF7Ispxo1TFxpa6GTWeZA3X4CwK +PHmCdhb+oZoi59Qny0KECxtBj6zwdYIKLN0gIFYygaGX5J+YrRVatTjCJUHz9fco +hZwApLEUkYg2Ldvbg+FncDwiVhi74OW685SkThNIulmPcQ== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALX6DqSWRJBEJJRI +RqG5X3cFHzse5jGIdV8fTqikaVitvuhs15z1njzfqBQZMJBCEvNb4eaenXJRMBDk +EOcbfy6ah+ZLLqGFy7b6OxTROfx++3fTgsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN +/ufbH2sX451nVd+j6oAz0dTz7RvhAgMBAAECgYEAmHRy+g5uSJLeNmBK1EiSIwtm +e8hKP+s7scJvyrdbDpEZJG2zQWtA82zIynXECsdgSwOKQQRXkaNU6oG3a3bM19uY +0CqFRb9EwOLIStp+CM5zLRGmUr73u/+JrBPUWWFJkJvINvTXt18CMnCmosTvygWB +IBZqsuEXQ6JcejxzQ6UCQQDdVUNdE2JgHp1qrr5l8563dztcrfCxuVFtgsj6qnhd +UrBAa388B9kn4yVAe2i55xFmtHsO9Bz3ViiDFO163SafAkEA0nq8PeZtcIlZ2c7+ +6/Vdw1uLE5APVG2H9VEZdaVvkwIIXo8WQfMwWo5MQyPjVyBhUGlDwnKa46AcuplJ +2XMtfwJBAIDrMfKb4Ng13OEP6Yz+yvr4MxZ3plQOqlRMMn53HubUzB6pvpGbzKwE +DWWyvDxUT/lvtKHwJJMYlz5KyUygVecCQHr50RBNmLW+2muDILiWlOD2lIyqh/pp +QJ2Zc8mkDkuTTXaKHZQM1byjFXXI+yRFu/Xyeu+abFsAiqiPtXFCdVsCQHai+Ykv +H3y0mUJmwBVP2fBE3GiTGlaadM0auZKu7/ad+yo7Hv8Kibacwibzrj9PjT3mFSSF +vujX1oWOaxAMVbE= +-----END PRIVATE KEY----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem b/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem new file mode 100644 index 00000000000..74dc9845e3d --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem @@ -0,0 +1,101 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 5 (0x5) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus + Validity + Not Before: Aug 7 17:19:17 2013 GMT + Not After : Dec 22 17:19:17 2040 GMT + Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=clustertest + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:98:ec:01:6e:f4:ae:8e:16:c8:87:a2:44:86:a0: + 45:5c:ca:82:56:ba:0d:a9:60:bf:07:40:da:db:70: + 33:a6:c2:ec:9d:e1:f0:da:fe:b9:f9:ac:23:33:64: + e6:63:71:cc:a2:0d:eb:86:bc:31:32:aa:30:e6:1d: + 5d:6d:fd:45:f4:2f:dc:72:93:bc:92:27:f7:6a:5a: + 18:04:f7:64:d0:6a:3c:a9:14:f6:9e:9d:58:26:f4: + 16:93:7e:3d:2e:3c:9e:54:41:4d:1a:e1:bd:b4:cf: + d0:05:4c:4d:15:fb:5c:70:1e:0c:32:6d:d7:67:5b: + ec:b2:61:83:e3:f0:b1:78:aa:30:45:86:f9:6d:f5: + 48:1f:f1:90:06:25:db:71:ed:af:d7:0d:65:65:70: + 89:d4:c8:c8:23:a0:67:22:de:d9:6e:1d:44:38:cf: + 0f:eb:2c:fe:79:01:d7:98:15:5f:22:42:3f:ee:c9: + 16:eb:b9:25:08:9a:2a:11:74:47:e0:51:75:8c:ae: + eb:8d:b5:30:fe:48:98:0a:9e:ba:6e:a4:60:08:81: + c6:05:a0:97:38:70:c0:1f:b4:27:96:8e:c3:d2:c1: + 14:5f:34:16:91:7d:ad:4c:e9:23:07:f0:42:86:78: + 11:a1:1e:9d:f3:d0:41:09:06:7d:5c:89:ef:d2:0d: + 6c:d5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + C9:00:3A:28:CC:6A:75:57:82:81:00:A6:25:48:6C:CE:0A:A0:4A:59 + X509v3 Authority Key Identifier: + keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16 + + Signature Algorithm: sha1WithRSAEncryption + d1:55:e3:5c:43:8c:4f:d3:29:8d:74:4a:1d:23:50:17:27:b3: + 30:6f:c6:d7:4c:6c:96:7e:52:a0:2f:91:92:b3:f5:4c:a1:ca: + 88:62:31:e4:d6:64:ac:40:17:47:00:24:e8:0d:3b:7b:c7:d4: + 7f:3a:76:45:27:fd:9b:ae:9d:44:71:8f:ab:62:60:e5:9d:e8: + 59:dd:0e:25:17:14:f8:83:b0:b6:fc:5f:27:8b:69:a2:dc:31: + b9:17:a1:27:92:96:c1:73:bf:a3:f0:b8:97:b9:e2:fb:97:6d: + 44:01:b0:68:68:47:4b:84:56:3b:19:66:f8:0b:6c:1b:f5:44: + a9:ae +-----BEGIN CERTIFICATE----- +MIIDdzCCAuCgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgwNzE3 +MTkxN1oXDTQwMTIyMjE3MTkxN1owbzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP +MA0GA1UECwwGS2VybmVsMRQwEgYDVQQDDAtjbHVzdGVydGVzdDCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJjsAW70ro4WyIeiRIagRVzKgla6DalgvwdA +2ttwM6bC7J3h8Nr+ufmsIzNk5mNxzKIN64a8MTKqMOYdXW39RfQv3HKTvJIn92pa +GAT3ZNBqPKkU9p6dWCb0FpN+PS48nlRBTRrhvbTP0AVMTRX7XHAeDDJt12db7LJh +g+PwsXiqMEWG+W31SB/xkAYl23Htr9cNZWVwidTIyCOgZyLe2W4dRDjPD+ss/nkB +15gVXyJCP+7JFuu5JQiaKhF0R+BRdYyu6421MP5ImAqeum6kYAiBxgWglzhwwB+0 +J5aOw9LBFF80FpF9rUzpIwfwQoZ4EaEenfPQQQkGfVyJ79INbNUCAwEAAaN7MHkw +CQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2Vy +dGlmaWNhdGUwHQYDVR0OBBYEFMkAOijManVXgoEApiVIbM4KoEpZMB8GA1UdIwQY +MBaAFAdBGTqffsW3Ik63vNXf5PwJuGQWMA0GCSqGSIb3DQEBBQUAA4GBANFV41xD +jE/TKY10Sh0jUBcnszBvxtdMbJZ+UqAvkZKz9UyhyohiMeTWZKxAF0cAJOgNO3vH +1H86dkUn/ZuunURxj6tiYOWd6FndDiUXFPiDsLb8XyeLaaLcMbkXoSeSlsFzv6Pw +uJe54vuXbUQBsGhoR0uEVjsZZvgLbBv1RKmu +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCY7AFu9K6OFsiH +okSGoEVcyoJWug2pYL8HQNrbcDOmwuyd4fDa/rn5rCMzZOZjccyiDeuGvDEyqjDm +HV1t/UX0L9xyk7ySJ/dqWhgE92TQajypFPaenVgm9BaTfj0uPJ5UQU0a4b20z9AF +TE0V+1xwHgwybddnW+yyYYPj8LF4qjBFhvlt9Ugf8ZAGJdtx7a/XDWVlcInUyMgj +oGci3tluHUQ4zw/rLP55AdeYFV8iQj/uyRbruSUImioRdEfgUXWMruuNtTD+SJgK +nrpupGAIgcYFoJc4cMAftCeWjsPSwRRfNBaRfa1M6SMH8EKGeBGhHp3z0EEJBn1c +ie/SDWzVAgMBAAECggEAfogRK5Dz+gfqByiCEO7+VagOrtolwbeWeNb2AEpXwq1Z +Ac5Y76uDkI4ZVkYvx6r6ykBAWOzQvH5MFavIieDeiA0uF/QcPMcrFmnTpBBb74No +C/OXmGjS7vBa2dHDp8VqsIaT2SFeSgUFt8yJoB2rP+3s47E1YYWTVYoQioO3JQJN +f0mSuvTnvJO9lbTWiW+yWGVkQvIciCCnHkCEwU0fHht8IoFBGNFlpWZcGiMeietr +16GdRcmAq95q8TTCeQxkgmmL+0ZJ1BrF7llG2pGYdacawXj1eVRqOHQaFIlcKe05 +RITpuXVYOWBpBpfbQsBZaCGLe7WxHJedrFxdbqm0ZQKBgQDLUQrmIl2wz43t3sI+ +WjW6y1GwMPG9EjXUT1Boq6PNHKgw04/32QNn5IMmz4cp2Mgyz7Hc0ABDU/ZATujd +yCkxVErPbKRDKSxSl6nLXtLpLbHFmVPfKPbNKIuyFMBsOFOtoFoVbo33wI5dI7aO +i7sTGB3ngbq4pzCJ9dVt/t81QwKBgQDAjAtBXS8WB69l9w35tx+MgYG0LJ+ykAug +d91pwiWqSt02fZ0nr/S/76G6B4C8eqeOnYh1RzF5isLD246rLD2Y+uuFrgasvSiS +4qSKbpG2kk02R/DRTAglAyXI0rhYIDrYKCQPWqNMWpawT/FQQwbFjTuhmz10FyXS +hmVztZWoBwKBgQCBdnptLibghllGxViEoaai6gJ7Ib9ceHMEXPjDnb+wxPWoGZ8L +4AjWJ+EHXpAfqmVYTX5hL6VrOdSNAHIxftoUCiuUxwYVqesKMH6y/A9q4WjYfRi1 ++fyliJLjc2lPv9IwtfGGwh3uS5ObZTlCrWES+IFaP/YozHUQ9BPSdb+lxwKBgB35 +Lv9b3CqXw6why2EmKpkax/AeSjXnyoeOYT9HY8mgodMLtt0ovPbr/McSx+2PQmon +B8kJ7h+3hB4tHYZz+prH5MYIky1svNYwxeBu2ewL1k0u4cQTC+mHFeivNNczHTXs ++cASIf2O1IpZx3zxEirKk4/StLxPpimhlkVu7P8dAoGBAJVw2U70+PagVBPtvheu +ZDEvxSEzrn90ivIh7Y6ZIwdSOSLW04sOVL2JAzO155u4g77jdmcxV3urr1vD9LbF +qkBGLXx7FFC/Mn/H42qerxr16Bt6RtvVpms71UIQLYxA7caab9cqoyt0wkgqJFKX +fj0TVODnIf+zPMDCu+frpLbA +-----END PRIVATE KEY----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js new file mode 100644 index 00000000000..e2ca646b63a --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js @@ -0,0 +1,202 @@ +// Merge the two options objects. Used as a helper when we are trying to actually compare options +// despite the fact that our test framework adds extra stuff to it. Anything set in the second +// options object overrides the first options object. The two objects must have the same structure. +function mergeOptions(obj1, obj2) { + var obj3 = {}; + for (var attrname in obj1) { + if (typeof obj1[attrname] === "object" && + typeof obj2[attrname] !== "undefined") { + if (typeof obj2[attrname] !== "object") { + throw "Objects being merged must have the same structure"; + } + obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]); + } + else { + obj3[attrname] = obj1[attrname]; + } + } + for (var attrname in obj2) { + if (typeof obj2[attrname] === "object" && + typeof obj1[attrname] !== "undefined") { + if (typeof obj1[attrname] !== "object") { + throw "Objects being merged must have the same structure"; + } + // Already handled above + } + else { + obj3[attrname] = obj2[attrname]; + } + } + return obj3; +} + +// Test that the parsed result of setting certain command line options has the correct format in +// mongod. See SERVER-13379. +// +// Arguments: +// mongoRunnerConfig - Configuration object to pass to the mongo runner +// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts" +// command, but with only the fields that should be set by the options implied by the first +// argument set. +// +// Example: +// +// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } }); +// +var getCmdLineOptsBaseMongod; +function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) { + + // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test + // framework without passing any additional options. We need this because the framework adds + // options of its own, and we only want to compare against the options we care about. + function getBaseOptsObject() { + + // Start mongod with no options + var baseMongod = MongoRunner.runMongod(); + + // Get base command line opts. Needed because the framework adds its own options + var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts"); + + // Stop the mongod we used to get the options + MongoRunner.stopMongod(baseMongod.port); + + return getCmdLineOptsBaseMongod; + } + + if (typeof getCmdLineOptsBaseMongod === "undefined") { + getCmdLineOptsBaseMongod = getBaseOptsObject(); + } + + // Get base command line opts. Needed because the framework adds its own options + var getCmdLineOptsExpected = getCmdLineOptsBaseMongod; + + // Delete port and dbPath if we are not explicitly setting them, since they will change on + // multiple runs of the test framework and cause false failures. + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.net === "undefined" || + typeof expectedResult.parsed.net.port === "undefined") { + delete getCmdLineOptsExpected.parsed.net.port; + } + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.storage === "undefined" || + typeof expectedResult.parsed.storage.dbPath === "undefined") { + delete getCmdLineOptsExpected.parsed.storage.dbPath; + } + + // Merge with the result that we expect + expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult); + + // Start mongod with options + var mongod = MongoRunner.runMongod(mongoRunnerConfig); + + // Get the parsed options + var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts"); + + // Delete port and dbPath if we are not explicitly setting them, since they will change on + // multiple runs of the test framework and cause false failures. + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.net === "undefined" || + typeof expectedResult.parsed.net.port === "undefined") { + delete getCmdLineOptsResult.parsed.net.port; + } + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.storage === "undefined" || + typeof expectedResult.parsed.storage.dbPath === "undefined") { + delete getCmdLineOptsResult.parsed.storage.dbPath; + } + + // Make sure the options are equal to what we expect + assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed); + + // Cleanup + MongoRunner.stopMongod(mongod.port); +} + +// Test that the parsed result of setting certain command line options has the correct format in +// mongos. See SERVER-13379. +// +// Arguments: +// mongoRunnerConfig - Configuration object to pass to the mongo runner +// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts" +// command, but with only the fields that should be set by the options implied by the first +// argument set. +// +// Example: +// +// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } }); +// +var getCmdLineOptsBaseMongos; +function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) { + + // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test + // framework without passing any additional options. We need this because the framework adds + // options of its own, and we only want to compare against the options we care about. + function getBaseOptsObject() { + + // Start mongod with no options + var baseMongod = MongoRunner.runMongod(); + + // Start mongos with only the configdb option + var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host }); + + // Get base command line opts. Needed because the framework adds its own options + var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts"); + + // Remove the configdb option + delete getCmdLineOptsBaseMongos.parsed.sharding.configDB; + + // Stop the mongod and mongos we used to get the options + MongoRunner.stopMongos(baseMongos.port); + MongoRunner.stopMongod(baseMongod.port); + + return getCmdLineOptsBaseMongos; + } + + if (typeof getCmdLineOptsBaseMongos === "undefined") { + getCmdLineOptsBaseMongos = getBaseOptsObject(); + } + + // Get base command line opts. Needed because the framework adds its own options + var getCmdLineOptsExpected = getCmdLineOptsBaseMongos; + + // Delete port if we are not explicitly setting it, since it will change on multiple runs of the + // test framework and cause false failures. + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.net === "undefined" || + typeof expectedResult.parsed.net.port === "undefined") { + delete getCmdLineOptsExpected.parsed.net.port; + } + + // Merge with the result that we expect + expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult); + + // Start mongod with no options + var mongod = MongoRunner.runMongod(); + + // Add configdb option + mongoRunnerConfig['configdb'] = mongod.host; + + // Start mongos connected to mongod + var mongos = MongoRunner.runMongos(mongoRunnerConfig); + + // Get the parsed options + var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts"); + + // Delete port if we are not explicitly setting it, since it will change on multiple runs of the + // test framework and cause false failures. + if (typeof expectedResult.parsed === "undefined" || + typeof expectedResult.parsed.net === "undefined" || + typeof expectedResult.parsed.net.port === "undefined") { + delete getCmdLineOptsResult.parsed.net.port; + } + + // Remove the configdb option + delete getCmdLineOptsResult.parsed.sharding.configDB; + + // Make sure the options are equal to what we expect + assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed); + + // Cleanup + MongoRunner.stopMongos(mongos.port); + MongoRunner.stopMongod(mongod.port); +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini new file mode 100644 index 00000000000..4cfaf3395f6 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini @@ -0,0 +1 @@ +noscripting=false diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json new file mode 100644 index 00000000000..9f9cc84d107 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json @@ -0,0 +1,5 @@ +{ + "security" : { + "authorization" : "enabled" + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json new file mode 100644 index 00000000000..a0d4f8af1be --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json @@ -0,0 +1,5 @@ +{ + "sharding" : { + "autoSplit" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json new file mode 100644 index 00000000000..c87dabe125d --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json @@ -0,0 +1,7 @@ +{ + "net" : { + "http" : { + "enabled" : true + } + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json new file mode 100644 index 00000000000..362db08edd3 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json @@ -0,0 +1,5 @@ +{ + "storage" : { + "indexBuildRetry" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json new file mode 100644 index 00000000000..d75b94ccbc7 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json @@ -0,0 +1,7 @@ +{ + "storage" : { + "journal" : { + "enabled" : false + } + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json new file mode 100644 index 00000000000..b52be7382ed --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json @@ -0,0 +1,5 @@ +{ + "net" : { + "wireObjectCheck" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json new file mode 100644 index 00000000000..218646b1662 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json @@ -0,0 +1,5 @@ +{ + "sharding" : { + "archiveMovedChunks" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json new file mode 100644 index 00000000000..15ecefbb546 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json @@ -0,0 +1,5 @@ +{ + "storage" : { + "preallocDataFiles" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json new file mode 100644 index 00000000000..e8f32f2c23c --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json @@ -0,0 +1,5 @@ +{ + "security" : { + "javascriptEnabled" : true + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json new file mode 100644 index 00000000000..660d21eb17f --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json @@ -0,0 +1,7 @@ +{ + "net" : { + "unixDomainSocket" : { + "enabled" : true + } + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json new file mode 100644 index 00000000000..944f0de1575 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json @@ -0,0 +1,5 @@ +{ + "operationProfiling" : { + "mode" : "all" + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json new file mode 100644 index 00000000000..522ca2b766f --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json @@ -0,0 +1,5 @@ +{ + "replication" : { + "replSetName" : "myconfigname" + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json new file mode 100644 index 00000000000..71f92f122db --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json @@ -0,0 +1,5 @@ +{ + "sharding" : { + "clusterRole" : "configsvr" + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json new file mode 100644 index 00000000000..47a1cce1b03 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json @@ -0,0 +1,5 @@ +{ + "systemLog" : { + "verbosity" : 5 + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem new file mode 100644 index 00000000000..dce0a0fb3f1 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV +BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx +MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0 +eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQ3NDFaFw00 +MDA0MjgxODQ3NDFaoA4wDDAKBgNVHRQEAwIBCzANBgkqhkiG9w0BAQUFAAOBgQAu +PlPDGei2q6kdkoHe8vmDuts7Hm/o9LFbBmn0XUcfHisCJCPsJTyGCsgnfIiBcXJY +1LMKsQFnYGv28rE2ZPpFg2qNxL+6qUEzCvqaHLX9q1V0F+f8hHDxucNYu52oo/h0 +uNZxB1KPFI2PReG5d3oUYqJ2+EctKkrGtxSPzbN0gg== +-----END X509 CRL----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem new file mode 100644 index 00000000000..85eeaff5543 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBujCCASMCAQEwDQYJKoZIhvcNAQEFBQAwgZIxCzAJBgNVBAYTAlVTMREwDwYD +VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwF +MTBHZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3Jp +dHkxGzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1cxcNMTMxMjA2MTUzMzUwWhcN +MTQwMTA1MTUzMzUwWjBMMBICAQwXDTEzMTIwNjE1MjczMFowGgIJAJGUg/wuW1KD +Fw0xMjEyMTIxODQ4MjJaMBoCCQCRlIP8LltShRcNMTIxMjEyMTg0ODUyWqAOMAww +CgYDVR0UBAMCAQ4wDQYJKoZIhvcNAQEFBQADgYEAERPfPdQnIafo1lYbFEx2ojrb +eYqvWN9ykTyUGq2bKv+STYiuaKUz6daGVjELjn/safn5wHkYr9+C/kRRoCor5HYw +N3uxHnkMpl6Xn7kgXL2b0jbdvfa44faOXdH2gbhzd8bFsOMra4QJHT6CgpYb3ei1 ++ePhAd1KS7tS/dyyP4c= +-----END X509 CRL----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem new file mode 100644 index 00000000000..88307503240 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV +BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx +MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0 +eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQwNTBaFw0x +MzAxMTExODQwNTBaoA4wDDAKBgNVHRQEAwIBAzANBgkqhkiG9w0BAQUFAAOBgQBs +jyvEdX8o0+PfRJsEv5oLwgp5y+YmKjRlXg2oj/ETxBDKNYtBY7B9Uu9q0chFtwTu +XMXeEFWuxnKG+4Ovp6JmNcCKkttUwsWQuR6dGpClW6ttTk0putAWtDnqukTPlEQ2 +XU3wco7ZgrTphvuGpaIQLM1sQg9x8SfW3q6/hxYm3A== +-----END X509 CRL----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal Binary files differnew file mode 100644 index 00000000000..687317844a7 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal Binary files differnew file mode 100644 index 00000000000..7dd98e2c97b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal Binary files differnew file mode 100644 index 00000000000..d76790d2451 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/fts.js b/src/mongo/gotools/test/legacy26/jstests/libs/fts.js new file mode 100644 index 00000000000..73b7d339ba5 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/fts.js @@ -0,0 +1,18 @@ + +function queryIDS( coll, search, filter, extra ){ + var cmd = { search : search } + if ( filter ) + cmd.filter = filter; + if ( extra ) + Object.extend( cmd, extra ); + lastCommadResult = coll.runCommand( "text" , cmd); + + return getIDS( lastCommadResult ); +} + +function getIDS( commandResult ){ + if ( ! ( commandResult && commandResult.results ) ) + return [] + + return commandResult.results.map( function(z){ return z.obj._id; } ) +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/fun.js b/src/mongo/gotools/test/legacy26/jstests/libs/fun.js new file mode 100644 index 00000000000..276f32a8f40 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/fun.js @@ -0,0 +1,32 @@ +// General high-order functions + +function forEach (action, array) { + for (var i = 0; i < array.length; i++) + action (array[i]); +} + +function foldl (combine, base, array) { + for (var i = 0; i < array.length; i++) + base = combine (base, array[i]); + return base +} + +function foldr (combine, base, array) { + for (var i = array.length - 1; i >= 0; i--) + base = combine (array[i], base); + return base +} + +function map (func, array) { + var result = []; + for (var i = 0; i < array.length; i++) + result.push (func (array[i])); + return result +} + +function filter (pred, array) { + var result = [] + for (var i = 0; i < array.length; i++) + if (pred (array[i])) result.push (array[i]); + return result +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js b/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js new file mode 100644 index 00000000000..60cb7733f5d --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js @@ -0,0 +1,99 @@ +GeoNearRandomTest = function(name) { + this.name = name; + this.t = db[name]; + this.nPts = 0; + + // reset state + this.t.drop(); + Random.srand(1234); + + print("starting test: " + name); +} + + +GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){ + if(!indexBounds){ + scale = scale || 1; // scale is good for staying away from edges + return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; + } + else{ + var range = indexBounds.max - indexBounds.min; + var eps = Math.pow(2, -40); + // Go very close to the borders but not quite there. + return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; + } + +} + +GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) { + assert.eq(this.nPts, 0, "insertPoints already called"); + this.nPts = nPts; + + for (var i=0; i<nPts; i++){ + this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)}); + } + + if(!indexBounds) + this.t.ensureIndex({loc: '2d'}); + else + this.t.ensureIndex({loc: '2d'}, indexBounds) +} + +GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) { + for (var i=0; i < short.length; i++){ + + var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0] + var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1] + var dS = short[i].obj ? short[i].dis : 1 + + var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0] + var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1] + var dL = long[i].obj ? long[i].dis : 1 + + assert.eq([xS, yS, dS], [xL, yL, dL]); + } +} + +GeoNearRandomTest.prototype.testPt = function(pt, opts) { + assert.neq(this.nPts, 0, "insertPoints not yet called"); + + opts = opts || {}; + opts['sphere'] = opts['sphere'] || 0; + opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 ) + + print("testing point: " + tojson(pt) + " opts: " + tojson(opts)); + + + var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere}; + + var last = db.runCommand(cmd).results; + for (var i=2; i <= opts.nToTest; i++){ + //print(i); // uncomment to watch status + cmd.num = i + var ret = db.runCommand(cmd).results; + + try { + this.assertIsPrefix(last, ret); + } catch (e) { + print("*** failed while compairing " + (i-1) + " and " + i); + printjson(cmd); + throw e; // rethrow + } + + last = ret; + } + + + if (!opts.sharded){ + last = last.map(function(x){return x.obj}); + + var query = {loc:{}}; + query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt; + var near = this.t.find(query).limit(opts.nToTest).toArray(); + + this.assertIsPrefix(last, near); + assert.eq(last, near); + } +} + + diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/grid.js b/src/mongo/gotools/test/legacy26/jstests/libs/grid.js new file mode 100644 index 00000000000..3a1253d83cd --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/grid.js @@ -0,0 +1,171 @@ +// Grid infrastructure: Servers, ReplicaSets, ConfigSets, Shards, Routers (mongos). Convenient objects and functions on top of those in shell/servers.js -Tony + +load('jstests/libs/fun.js') +load('jstests/libs/network.js') + +// New servers and routers take and increment port number from this. +// A comment containing FreshPorts monad implies reading and incrementing this, IO may also read/increment this. +var nextPort = 31000 + +/*** Server is the spec of a mongod, ie. all its command line options. + To start a server call 'begin' ***/ +// new Server :: String -> FreshPorts Server +function Server (name) { + this.addr = '127.0.0.1'; + this.dirname = name + nextPort; + this.args = { port : nextPort++, + noprealloc : '', + smallfiles : '', + rest : '', + oplogSize : 8 } +} + +// Server -> String <addr:port> +Server.prototype.host = function() { + return this.addr + ':' + this.args.port +} + +// Start a new server with this spec and return connection to it +// Server -> IO Connection +Server.prototype.begin = function() { + return startMongodTest(this.args.port, this.dirname, false, this.args); +} + +// Stop server and remove db directory +// Server -> IO () +Server.prototype.end = function() { + print('Stopping mongod on port ' + this.args.port) + stopMongod (this.args.port) + resetDbpath (MongoRunner.dataPath + this.dirname) +} + +// Cut server from network so it is unreachable (but still alive) +// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux. +function cutServer (conn) { + var addrport = parseHost (conn.host) + cutNetwork (addrport.port) +} + +// Ensure server is connected to network (undo cutServer) +// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux. +function uncutServer (conn) { + var iport = parseHost (conn.host) + restoreNetwork (iport.port) +} + +// Kill server process at other end of this connection +function killServer (conn, _signal) { + var signal = _signal || 15 + var iport = parseHost (conn.host) + stopMongod (iport.port, signal) +} + +/*** ReplicaSet is the spec of a replica set, ie. options given to ReplicaSetTest. + To start a replica set call 'begin' ***/ +// new ReplicaSet :: String -> Int -> FreshPorts ReplicaSet +function ReplicaSet (name, numServers) { + this.name = name + this.host = '127.0.0.1' + this.nodes = numServers + this.startPort = nextPort + this.oplogSize = 40 + nextPort += numServers +} + +// Start a replica set with this spec and return ReplSetTest, which hold connections to the servers including the master server. Call ReplicaSetTest.stopSet() to end all servers +// ReplicaSet -> IO ReplicaSetTest +ReplicaSet.prototype.begin = function() { + var rs = new ReplSetTest(this) + rs.startSet() + rs.initiate() + rs.awaitReplication() + return rs +} + +// Create a new server and add it to replica set +// ReplicaSetTest -> IO Connection +ReplSetTest.prototype.addServer = function() { + var conn = this.add() + nextPort++ + this.reInitiate() + this.awaitReplication(60000) + assert.soon(function() { + var doc = conn.getDB('admin').isMaster() + return doc['ismaster'] || doc['secondary'] + }) + return conn +} + +/*** ConfigSet is a set of specs (Servers) for sharding config servers. + Supply either the servers or the number of servers desired. + To start the config servers call 'begin' ***/ +// new ConfigSet :: [Server] or Int -> FreshPorts ConfigSet +function ConfigSet (configSvrsOrNumSvrs) { + if (typeof configSvrsOrNumSvrs == 'number') { + this.configSvrs = [] + for (var i = 0; i < configSvrsOrNumSvrs; i++) + this.configSvrs.push (new Server ('config')) + } else + this.configSvrs = configSvrs +} + +// Start config servers, return list of connections to them +// ConfigSet -> IO [Connection] +ConfigSet.prototype.begin = function() { + return map (function(s) {return s.begin()}, this.configSvrs) +} + +// Stop config servers +// ConfigSet -> IO () +ConfigSet.prototype.end = function() { + return map (function(s) {return s.end()}, this.configSvrs) +} + +/*** Router is the spec for a mongos, ie, its command line options. + To start a router (mongos) call 'begin' ***/ +// new Router :: ConfigSet -> FreshPorts Router +function Router (configSet) { + this.args = { port : nextPort++, + v : 0, + configdb : map (function(s) {return s.host()}, configSet.configSvrs) .join(','), + chunkSize : 1} +} + +// Start router (mongos) with this spec and return connection to it. +// Router -> IO Connection +Router.prototype.begin = function() { + return startMongos (this.args); +} + +// Stop router +// Router -> IO () +Router.prototype.end = function() { + return stopMongoProgram (this.args.port) +} + +// Add shard to config via router (mongos) connection. Shard is either a replSet name (replSet.getURL()) or single server (server.host) +// Connection -> String -> IO () +function addShard (routerConn, repSetOrHostName) { + var ack = routerConn.getDB('admin').runCommand ({addshard: repSetOrHostName}) + assert (ack['ok'], tojson(ack)) +} + +// Connection -> String -> IO () +function enableSharding (routerConn, dbName) { + var ack = routerConn.getDB('admin').runCommand ({enablesharding: dbName}) + assert (ack['ok'], tojson(ack)) +} + +// Connection -> String -> String -> String -> IO () +function shardCollection (routerConn, dbName, collName, shardKey) { + var ack = routerConn.getDB('admin').runCommand ({shardcollection: dbName + '.' + collName, key: shardKey}) + assert (ack['ok'], tojson(ack)) +} + +// Move db from its current primary shard to given shard. Shard is either a replSet name (replSet.getURL()) or single server (server.host) +// Connection -> String -> String -> IO () +function moveDB (routerConn, dbname, repSetOrHostName) { + var ack = routerConn.getDB('admin').runCommand ({moveprimary: dbname, to: repSetOrHostName}) + printjson(ack) + assert (ack['ok'], tojson(ack)) +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/key1 b/src/mongo/gotools/test/legacy26/jstests/libs/key1 new file mode 100644 index 00000000000..b5c19e4092f --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/key1 @@ -0,0 +1 @@ +foop de doop diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/key2 b/src/mongo/gotools/test/legacy26/jstests/libs/key2 new file mode 100644 index 00000000000..cbde8212841 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/key2 @@ -0,0 +1 @@ +other key diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem new file mode 100644 index 00000000000..e181139b5d9 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem @@ -0,0 +1,101 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 8 (0x8) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus + Validity + Not Before: Nov 6 14:31:58 2013 GMT + Not After : Mar 23 14:31:58 2041 GMT + Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=127.0.0.1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:aa:e1:a0:6c:09:dc:fd:d0:9f:0f:b6:77:40:60: + f9:01:f9:9e:55:20:fe:88:04:93:c9:ab:96:93:3a: + ed:7e:7d:ad:e4:eb:a7:e9:07:35:ef:6e:14:64:dd: + 31:9b:e5:24:06:18:bb:60:67:e3:c5:49:8e:79:b6: + 78:07:c1:64:3f:de:c1:7d:1b:a9:96:35:d5:f9:b8: + b4:5e:2a:34:b7:d0:19:ad:f6:8a:00:ef:8e:b0:d5: + 36:1f:66:a0:7a:7d:cf:f0:98:3c:ee:0f:be:67:d2: + de:c3:e6:b8:79:2f:64:40:0c:39:15:97:8c:13:da: + 1b:db:5c:bb:a3:43:0b:74:c7:46:55:9b:ea:d7:93: + d5:15:2f:d1:34:ac:a9:99:3b:01:f0:c1:d7:42:89: + 24:bb:ab:60:99:c1:4d:9f:bf:9a:a3:92:3a:58:05: + e2:47:a6:8e:71:b2:0a:32:b0:c5:cc:a0:58:40:bf: + 09:a7:76:f5:37:ce:90:71:e0:75:89:17:ea:fb:80: + 24:a1:9d:6e:1b:7e:e3:44:52:d3:fe:e3:de:80:9a: + 8e:c3:4f:8c:bb:b4:8c:d2:a9:a9:aa:af:90:ac:b4: + ee:6b:d2:c5:71:1e:08:7f:4c:b6:2a:5f:13:7a:e3: + 29:f7:2e:bb:f7:c5:48:0a:4e:2e:1e:d4:2c:40:b3: + 4c:19 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + 0E:3F:54:C4:77:85:FF:93:58:A7:24:23:32:35:73:B0:BE:8C:C3:BB + X509v3 Authority Key Identifier: + keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16 + + Signature Algorithm: sha1WithRSAEncryption + 4c:9d:31:81:b5:e9:6a:64:4c:1e:eb:91:7f:f1:66:74:46:13: + 19:cb:f2:3b:9a:41:f2:83:67:32:53:a6:cd:33:37:4c:92:a6: + 36:d4:f3:0b:56:a2:2b:66:f1:09:a7:06:36:b8:83:b7:31:70: + fe:bf:af:b5:3d:59:f3:f2:18:48:c7:6c:b0:90:8c:24:47:30: + 53:8d:c5:3e:7c:7b:33:53:15:ec:bd:8a:83:ed:05:e8:8b:21: + d7:65:39:69:95:c8:58:7d:4f:1b:32:51:85:2d:4d:8b:be:00: + 60:17:83:9b:2b:13:43:05:78:db:a4:2e:a2:cb:31:34:7e:b9: + 8a:72 +-----BEGIN CERTIFICATE----- +MIIDZDCCAs2gAwIBAgIBCDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0 +MzE1OFoXDTQxMDMyMzE0MzE1OFowXDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjES +MBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAquGgbAnc/dCfD7Z3QGD5AfmeVSD+iASTyauWkzrtfn2t5Oun6Qc1724UZN0x +m+UkBhi7YGfjxUmOebZ4B8FkP97BfRupljXV+bi0Xio0t9AZrfaKAO+OsNU2H2ag +en3P8Jg87g++Z9Lew+a4eS9kQAw5FZeME9ob21y7o0MLdMdGVZvq15PVFS/RNKyp +mTsB8MHXQokku6tgmcFNn7+ao5I6WAXiR6aOcbIKMrDFzKBYQL8Jp3b1N86QceB1 +iRfq+4AkoZ1uG37jRFLT/uPegJqOw0+Mu7SM0qmpqq+QrLTua9LFcR4If0y2Kl8T +euMp9y6798VICk4uHtQsQLNMGQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG ++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU +Dj9UxHeF/5NYpyQjMjVzsL6Mw7swHwYDVR0jBBgwFoAUB0EZOp9+xbciTre81d/k +/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEATJ0xgbXpamRMHuuRf/FmdEYTGcvyO5pB +8oNnMlOmzTM3TJKmNtTzC1aiK2bxCacGNriDtzFw/r+vtT1Z8/IYSMdssJCMJEcw +U43FPnx7M1MV7L2Kg+0F6Ish12U5aZXIWH1PGzJRhS1Ni74AYBeDmysTQwV426Qu +ossxNH65inI= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCq4aBsCdz90J8P +tndAYPkB+Z5VIP6IBJPJq5aTOu1+fa3k66fpBzXvbhRk3TGb5SQGGLtgZ+PFSY55 +tngHwWQ/3sF9G6mWNdX5uLReKjS30Bmt9ooA746w1TYfZqB6fc/wmDzuD75n0t7D +5rh5L2RADDkVl4wT2hvbXLujQwt0x0ZVm+rXk9UVL9E0rKmZOwHwwddCiSS7q2CZ +wU2fv5qjkjpYBeJHpo5xsgoysMXMoFhAvwmndvU3zpBx4HWJF+r7gCShnW4bfuNE +UtP+496Amo7DT4y7tIzSqamqr5CstO5r0sVxHgh/TLYqXxN64yn3Lrv3xUgKTi4e +1CxAs0wZAgMBAAECggEADtdh04BXzUOdTQQP/2tstRs1ATfIY4/iNhXNEiSAFAhe +Xg+Jmdeie5UX+FqtwFh6dH0ZaRoc0jm9Qhzy99l4F4QFUhRg+kbausGsCLGpun08 +fbt36PTlc75Q4RFMxta+hKr0P8jmRKYv6tvTEdNn5ZgqLRHofKDo4nh/Y4KjMBUq +VIMUu+VO9Ol2GPlZVRBaJec0E1+HUyzaK5JVUIFh4atcrHyXxae+rY9o6G57BBEj +ZzlahfMI5aYj9HhXnB8RuhVBuIZBNSA41nxHmOs6JBQsatVML51RFIV4KPU+AyDR +bdYXHJehRIUF8RL92aHjGYsvXdSxVhuUBqMIQhOwAQKBgQDUtj+p+7SHpLyQIZpU +EQFK+42LDc6zF4uJVjq1d8fC2Hrmz8PLs0KcH36VWNbo48B3iFiPWIMID5xwLuIb +FkLOzJ8QrbILn0zcu/hplrCiy6PZas3rpLJ+X406wLQeCikOLhQkz+cuKuQmvWkK +eyqwBIIxg8t5dTtTAmu3w/DDgQKBgQDNqByxKduTgEND1+isUOt+L/ipR3SzXQ4m +ZsOKiSxyXxge0/CUxPxO6WeEVGQ7bGAr5yQD9ukvJnCo3phYcuRRj+RTMrTL73Kz +p/cyOUx2NMUIgURTsO+s3D0lC4+NmoDge0roeEDX+/lFNjqgRKJ+1LUimqbo5uNE +EupkyTh0mQKBgGw/81ZGSjFdnLic4TU3Ejlem0HQ3Qg3S0OxJl+DfZ2jHaiowzO/ +Hn7laD4I4BXVEfXC5Y7NtKE9kJdmxJqUUZt8dta+DoXro+oRnvHdRjcS+2eB+xmY +z12QswbbWs6OzSXyPT4er7/HBCTS78nttGOvZ7JbKAm/p1kvOjJi/PwBAoGAE7Tw +Sum/6Lp5t56Q5TI73rOqGE6ImEdqe7ONOVE7uRnzrcCRZTAbHVSwXrXXhPo1nP9h +LCAU6De+w+/QmWkpB8fKEU7ilEg1rZGC1oU3FnyoBNCeQ4bI8L+J/GrHLsKHZvtp +ii07yXaTxFYV+BWbnJu1X8OCCv9U98j4PQArMMECgYEAm6uLN647vb+ZhzNBMtsX +1wnMSgzbgGpgjhWwk6dNmw8YJNKg9CFa8sQ8N7yKXWBEF/RkU0kfzZL8iddHEb/k +Ti1BlwrEzFfIQLlBfv47tYWOj8ZxN0ujlzUoN2VAC25LZhjcQCo3ftBk2lkrmllu +MxjxBfRk/teUdRl80oi5R0w= +-----END PRIVATE KEY----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem new file mode 100644 index 00000000000..beb0bb91b61 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem @@ -0,0 +1,100 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 9 (0x9) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus + Validity + Not Before: Nov 6 14:45:13 2013 GMT + Not After : Mar 23 14:45:13 2041 GMT + Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=santesthostname.com + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:c9:83:7a:75:42:cf:35:a4:95:c7:c8:d8:4d:19: + 0e:89:87:d5:bd:f9:2f:ee:20:2c:4c:ca:6d:0b:c1: + 10:5b:06:1b:c4:a1:26:12:25:06:7a:1e:d1:e6:d0: + 91:2b:a3:c8:74:de:95:10:d9:ff:20:03:ec:84:db: + 49:d9:a4:e9:c2:93:f0:d2:32:01:a6:55:db:14:bf: + 16:fe:88:e0:e4:46:0f:6a:bd:27:95:45:2e:8d:13: + e2:99:09:74:e4:2b:32:c3:6d:61:0c:86:85:eb:12: + f5:dc:9e:7b:d3:00:a3:ce:f4:8a:4b:51:7f:a2:c6: + 0b:52:a4:f1:41:d5:01:53:88:99:b9:3b:29:f8:43: + 5e:a4:c7:41:d9:d3:34:43:f2:c7:a6:8d:22:1c:f9: + b2:63:cb:df:83:9c:6f:ec:e3:b0:63:af:0b:51:c9: + 20:ca:c2:59:c1:2c:ec:de:37:18:76:3d:73:85:82: + 12:11:cd:b6:ef:2f:7b:64:cd:a3:2d:f6:7a:54:7f: + b3:4f:c9:38:f4:62:b6:da:00:f0:59:df:e1:d3:15: + ca:4b:73:6c:22:c1:9a:c1:51:c4:28:59:0f:71:2a: + 39:e9:17:08:9d:b0:88:61:a7:53:67:da:dc:fb:6e: + 38:f7:a8:cd:cd:88:ed:d9:4c:88:f4:a4:75:5e:3f: + 8b:ff + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + OpenSSL Certificate for SSL Server + X509v3 Subject Alternative Name: + DNS:*.example.com, DNS:127.0.0.1, DNS:morefun!, IP Address:154.2.2.3, email:user@host.com + Signature Algorithm: sha1WithRSAEncryption + 0b:82:c6:7d:e0:ba:71:24:d6:a8:f4:cb:6f:0f:f6:69:28:32: + 98:81:e6:14:49:81:07:ff:92:dd:0a:a4:68:3c:92:00:e5:8c: + 43:d1:29:04:4a:5e:f2:b1:db:d2:ca:5d:7d:fc:fe:7b:f5:01: + 65:87:25:cd:4c:68:09:16:bd:c7:b0:a4:d2:89:5e:dd:92:44: + 6c:6e:7a:fe:7e:05:e2:2b:56:96:96:16:44:4a:01:87:8f:0c: + df:35:88:97:3e:e5:21:23:a2:af:87:ad:ee:f7:9e:05:36:f7: + 96:88:c8:fa:92:33:c2:60:2e:14:d9:ea:34:ab:04:a6:78:04: + be:da +-----BEGIN CERTIFICATE----- +MIIDjDCCAvWgAwIBAgIBCTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0 +NDUxM1oXDTQxMDMyMzE0NDUxM1owZjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEc +MBoGA1UEAwwTc2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMmDenVCzzWklcfI2E0ZDomH1b35L+4gLEzKbQvBEFsGG8Sh +JhIlBnoe0ebQkSujyHTelRDZ/yAD7ITbSdmk6cKT8NIyAaZV2xS/Fv6I4ORGD2q9 +J5VFLo0T4pkJdOQrMsNtYQyGhesS9dyee9MAo870iktRf6LGC1Kk8UHVAVOImbk7 +KfhDXqTHQdnTNEPyx6aNIhz5smPL34Ocb+zjsGOvC1HJIMrCWcEs7N43GHY9c4WC +EhHNtu8ve2TNoy32elR/s0/JOPRittoA8Fnf4dMVyktzbCLBmsFRxChZD3EqOekX +CJ2wiGGnU2fa3PtuOPeozc2I7dlMiPSkdV4/i/8CAwEAAaOBmDCBlTAJBgNVHRME +AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiT3BlblNTTCBD +ZXJ0aWZpY2F0ZSBmb3IgU1NMIFNlcnZlcjBCBgNVHREEOzA5gg0qLmV4YW1wbGUu +Y29tggkxMjcuMC4wLjGCCG1vcmVmdW4hhwSaAgIDgQ11c2VyQGhvc3QuY29tMA0G +CSqGSIb3DQEBBQUAA4GBAAuCxn3gunEk1qj0y28P9mkoMpiB5hRJgQf/kt0KpGg8 +kgDljEPRKQRKXvKx29LKXX38/nv1AWWHJc1MaAkWvcewpNKJXt2SRGxuev5+BeIr +VpaWFkRKAYePDN81iJc+5SEjoq+Hre73ngU295aIyPqSM8JgLhTZ6jSrBKZ4BL7a +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJg3p1Qs81pJXH +yNhNGQ6Jh9W9+S/uICxMym0LwRBbBhvEoSYSJQZ6HtHm0JEro8h03pUQ2f8gA+yE +20nZpOnCk/DSMgGmVdsUvxb+iODkRg9qvSeVRS6NE+KZCXTkKzLDbWEMhoXrEvXc +nnvTAKPO9IpLUX+ixgtSpPFB1QFTiJm5Oyn4Q16kx0HZ0zRD8semjSIc+bJjy9+D +nG/s47BjrwtRySDKwlnBLOzeNxh2PXOFghIRzbbvL3tkzaMt9npUf7NPyTj0Yrba +APBZ3+HTFcpLc2wiwZrBUcQoWQ9xKjnpFwidsIhhp1Nn2tz7bjj3qM3NiO3ZTIj0 +pHVeP4v/AgMBAAECggEAbaQ12ttQ9rToMd2bosdBW58mssiERaIHuHhjQIP5LC10 +qlWr6y9uCMAAIP/WHNJuXPhGTvbtkzPPWrIdymeqMI5h91vx/di07OLT1gYPpuRf +uwnUIamUnHn3TqEQkpzWb/JxXWlMMA0O7MzmPnYYqp/vJu/e7Geo/Xx1MAZ/RD0U +YUvrjAyHcor01VVa/eV69jL+6x9ExFNmRYRbmjmK/f10R4o86nIfqhXbM8qKsT6x +1U/S2I4oModm0x12PgiMDMDzVD+cNE/h8lSnFtBTNEY3xRe7CZnhMV4nBVGjWi9D +XjcIBA0kGd4G10ploiF+37J/PQbyodLA/Y30BIYCkQKBgQD6XvEzd4DbBa08pcCa +CYZd5pyAHur1GzJ4rTQNqB84hzuyG6dKkk0rPXjExrj/GAtGWg2ohggmC5OPInKM +WdpMC56Q0aZYMId3Be/Wg4kRgFO0YOsrx0dRVi5nwbRXkMjXbfewSopwbzP5hIo1 +7rfOhdhbjXx6W269FPE4Epmj1QKBgQDOC1QjGeEzwEgSq3LuojRLHFo31pWYr7UU +sxhpoWMB6ImPMVjXaEsRKfc7Gulpee1KVQLVmzbkqrHArVNXEpuG4egRwZ10UJ0L +v4PqrElyHKxgAvllflkkMSX4rx791T+AZMq6W5VX1fKiojfvSLzmEFaI6VmS43GZ +KCz9RFbegwKBgHSE4vP01b8YsTrcWPpXHHVu8b6epPJVKfQHh4YjjAQey6VkQULv +O4K4JRBO+6GcawLeviSD3B74nD+s5Gp1Fqb1cWIsb6HzU9gMp0XKCWxfsJTt1gSV +xZcQ6J/ZAjkOZKn9v5wH1M3msuWYzUm0Q06V888H1bqL+sl8iZZy8ZXRAoGBALf6 +GZh2BUYGTNSOzkMSBouCt3PgYRdC3PesqwG2nwcXMazwLRm6AD1FMYJPF1edDSow +GiXNQAiR+cHHggDflourr2IbdZJkYLYavZmPWM1RmQDp5vKfDM1qLTOOeqe//8GP +Pg2EtScG3G4nVraMRk9PC1WYtuiXudk9rF5A5SgtAoGBAL1oVSnQpi5tzBNJqhzM +mQIF7ct5WNj2b1lKqqsXUTd2pcgMCRrryatqH+gLz1rAjtbVfx2FAYkutH5TFgqP +c4uomUH3so1EjEA8GtFS9SSkLn5nIr4TnVy4+Qsr1svOo8mhtztORXz+xOTxR6ud +p7rd/YEbc5GhNSXlcW+apZW+ +-----END PRIVATE KEY----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf b/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf new file mode 100644 index 00000000000..0f004f2de8a --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf @@ -0,0 +1,13 @@ +[libdefaults] + default_realm = 10GEN.ME + +[realms] + 10GEN.ME = { + kdc = kdc.10gen.me + admin_server = kdc.10gen.me + default_domain = 10gen.me + } + +[domain_realm] + .10gen.me = 10GEN.ME + 10gen.me = 10GEN.ME diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab b/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab Binary files differnew file mode 100644 index 00000000000..3529d5fcbc6 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab b/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab Binary files differnew file mode 100644 index 00000000000..35fd2ff06e7 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/network.js b/src/mongo/gotools/test/legacy26/jstests/libs/network.js new file mode 100644 index 00000000000..e5b33f3219e --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/network.js @@ -0,0 +1,37 @@ + +// Parse "127.0.0.1:300" into {addr: "127.0.0.1", port: 300}, +// and "127.0.0.1" into {addr: "127.0.0.1", port: undefined} +function parseHost (hostString) { + var items = hostString.match(/(\d+.\d+.\d+.\d+)(:(\d+))?/) + return {addr: items[1], port: parseInt(items[3])} +} + + +/* Network traffic shaping (packet dropping) to simulate network problems + Currently works on BSD Unix and Mac OS X only (using ipfw). + Requires sudo access. + TODO: make it work on Linux too (using iptables). */ + +var nextRuleNum = 100 // this grows indefinitely but can't exceed 65534, so can't call routines below indefinitely +var portRuleNum = {} + +// Cut network connection to local port by dropping packets using iptables +function cutNetwork (port) { + portRuleNum[port] = nextRuleNum + runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any to any ' + port) + runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any ' + port + ' to any') + //TODO: confirm it worked (since sudo may not work) + runProgram ('sudo', 'ipfw', 'show') +} + +// Restore network connection to local port by not dropping packets using iptables +function restoreNetwork (port) { + var ruleNum = portRuleNum[port] + if (ruleNum) { + runProgram ('sudo', 'ipfw', 'delete ' + ruleNum++) + runProgram ('sudo', 'ipfw', 'delete ' + ruleNum) + delete portRuleNum[port] + } + //TODO: confirm it worked (since sudo may not work) + runProgram ('sudo', 'ipfw', 'show') +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js b/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js new file mode 100644 index 00000000000..d5cb5346abe --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js @@ -0,0 +1,259 @@ +/** + * The ParallelTester class is used to test more than one test concurrently + */ + + +if ( typeof _threadInject != "undefined" ){ + //print( "fork() available!" ); + + Thread = function(){ + this.init.apply( this, arguments ); + } + _threadInject( Thread.prototype ); + + ScopedThread = function() { + this.init.apply( this, arguments ); + } + ScopedThread.prototype = new Thread( function() {} ); + _scopedThreadInject( ScopedThread.prototype ); + + fork = function() { + var t = new Thread( function() {} ); + Thread.apply( t, arguments ); + return t; + } + + // Helper class to generate a list of events which may be executed by a ParallelTester + EventGenerator = function( me, collectionName, mean, host ) { + this.mean = mean; + if (host == undefined) host = db.getMongo().host; + this.events = new Array( me, collectionName, host ); + } + + EventGenerator.prototype._add = function( action ) { + this.events.push( [ Random.genExp( this.mean ), action ] ); + } + + EventGenerator.prototype.addInsert = function( obj ) { + this._add( "t.insert( " + tojson( obj ) + " )" ); + } + + EventGenerator.prototype.addRemove = function( obj ) { + this._add( "t.remove( " + tojson( obj ) + " )" ); + } + + EventGenerator.prototype.addUpdate = function( objOld, objNew ) { + this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" ); + } + + EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) { + query = query || {}; + shouldPrint = shouldPrint || false; + checkQuery = checkQuery || false; + var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );" + if ( checkQuery ) { + action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );" + } + if ( shouldPrint ) { + action += " print( me + ' ' + " + count + " );"; + } + this._add( action ); + } + + EventGenerator.prototype.getEvents = function() { + return this.events; + } + + EventGenerator.dispatch = function() { + var args = argumentsToArray( arguments ); + var me = args.shift(); + var collectionName = args.shift(); + var host = args.shift(); + var m = new Mongo( host ); + var t = m.getDB( "test" )[ collectionName ]; + for( var i in args ) { + sleep( args[ i ][ 0 ] ); + eval( args[ i ][ 1 ] ); + } + } + + // Helper class for running tests in parallel. It assembles a set of tests + // and then calls assert.parallelests to run them. + ParallelTester = function() { + assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode") + this.params = new Array(); + } + + ParallelTester.prototype.add = function( fun, args ) { + args = args || []; + args.unshift( fun ); + this.params.push( args ); + } + + ParallelTester.prototype.run = function( msg, newScopes ) { + newScopes = newScopes || false; + assert.parallelTests( this.params, msg, newScopes ); + } + + // creates lists of tests from jstests dir in a format suitable for use by + // ParallelTester.fileTester. The lists will be in random order. + // n: number of lists to split these tests into + ParallelTester.createJstestsLists = function( n ) { + var params = new Array(); + for( var i = 0; i < n; ++i ) { + params.push( [] ); + } + + var makeKeys = function( a ) { + var ret = {}; + for( var i in a ) { + ret[ a[ i ] ] = 1; + } + return ret; + } + + // some tests can't run in parallel with most others + var skipTests = makeKeys([ "dbadmin.js", + "repair.js", + "cursor8.js", + "recstore.js", + "extent.js", + "indexb.js", + + // tests turn on profiling + "profile1.js", + "profile3.js", + "profile4.js", + "profile5.js", + + "mr_drop.js", + "mr3.js", + "indexh.js", + "apitest_db.js", + "evalb.js", + "evald.js", + "evalf.js", + "killop.js", + "run_program1.js", + "notablescan.js", + "drop2.js", + "dropdb_race.js", + "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed. + "bench_test1.js", + "padding.js", + "queryoptimizera.js", + "loglong.js",// log might overflow before + // this has a chance to see the message + "connections_opened.js", // counts connections, globally + "opcounters.js", + "currentop.js", // SERVER-8673, plus rwlock yielding issues + "set_param1.js", // changes global state + "geo_update_btree2.js", // SERVER-11132 test disables table scans + "update_setOnInsert.js", // SERVER-9982 + ] ); + + var parallelFilesDir = "jstests/core"; + + // some tests can't be run in parallel with each other + var serialTestsArr = [ parallelFilesDir + "/fsync.js", + parallelFilesDir + "/auth1.js", + + // These tests expect the profiler to be on or off at specific points + // during the test run. + parallelFilesDir + "/cursor6.js", + parallelFilesDir + "/profile2.js", + parallelFilesDir + "/updatee.js" + ]; + var serialTests = makeKeys( serialTestsArr ); + + // prefix the first thread with the serialTests + // (which we will exclude from the rest of the threads below) + params[ 0 ] = serialTestsArr; + var files = listFiles( parallelFilesDir ); + files = Array.shuffle( files ); + + var i = 0; + files.forEach( + function(x) { + if ( ( /[\/\\]_/.test(x.name) ) || + ( ! /\.js$/.test(x.name) ) || + ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || // + ( x.name in serialTests )) { + print(" >>>>>>>>>>>>>>> skipping " + x.name); + return; + } + // add the test to run in one of the threads. + params[ i % n ].push( x.name ); + ++i; + } + ); + + // randomize ordering of the serialTests + params[ 0 ] = Array.shuffle( params[ 0 ] ); + + for( var i in params ) { + params[ i ].unshift( i ); + } + + return params; + } + + // runs a set of test files + // first argument is an identifier for this tester, remaining arguments are file names + ParallelTester.fileTester = function() { + var args = argumentsToArray( arguments ); + var suite = args.shift(); + args.forEach( + function( x ) { + print(" S" + suite + " Test : " + x + " ..."); + var time = Date.timeFunc( function() { load(x); }, 1); + print(" S" + suite + " Test : " + x + " " + time + "ms" ); + } + ); + } + + // params: array of arrays, each element of which consists of a function followed + // by zero or more arguments to that function. Each function and its arguments will + // be called in a separate thread. + // msg: failure message + // newScopes: if true, each thread starts in a fresh scope + assert.parallelTests = function( params, msg, newScopes ) { + newScopes = newScopes || false; + var wrapper = function( fun, argv ) { + eval ( + "var z = function() {" + + "var __parallelTests__fun = " + fun.toString() + ";" + + "var __parallelTests__argv = " + tojson( argv ) + ";" + + "var __parallelTests__passed = false;" + + "try {" + + "__parallelTests__fun.apply( 0, __parallelTests__argv );" + + "__parallelTests__passed = true;" + + "} catch ( e ) {" + + "print('');" + + "print( '********** Parallel Test FAILED: ' + tojson(e) );" + + "print('');" + + "}" + + "return __parallelTests__passed;" + + "}" + ); + return z; + } + var runners = new Array(); + for( var i in params ) { + var param = params[ i ]; + var test = param.shift(); + var t; + if ( newScopes ) + t = new ScopedThread( wrapper( test, param ) ); + else + t = new Thread( wrapper( test, param ) ); + runners.push( t ); + } + + runners.forEach( function( x ) { x.start(); } ); + var nFailed = 0; + // v8 doesn't like it if we exit before all threads are joined (SERVER-529) + runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } ); + assert.eq( 0, nFailed, msg ); + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem b/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem new file mode 100644 index 00000000000..87976e7a574 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem @@ -0,0 +1,51 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIgWTIkEmBBfoCAggA +MBQGCCqGSIb3DQMHBAjzL6xrCrEygwSCBMihG8kg3nTnTtWAbB+d1D+HJxriqm37 +7rwjkfa+T5w5ZBRGpsTt3QB5ep0maX72H55ns6ukkeMoDBSadhDWrGWcLQ2IOGt3 +E14KU6vMFe3gQkfF1fupp7F+3ma58/VNUKa4X5pzZ7OCf8inlLWejp8BRqbrPWqw +Errgw1kNN3gWfQMr7JtIt1yI1xIMEB2Z976Jn0gaGnJAtzIW4thqjkDdb8b33S9f +cb7N1Fq4cly22f9HdqNcLgVTi1zIlPXc/f/6mtsGTsJv/rMPthJ7c3Smvh3Fce2G +w8e+ypfey+9QG3fk7RslaFRe8ShgqfdR8CAalp2UzwNbX91Agyuim3TA6s4jM8N9 +cF6CXlqEaA4sKhiOJmw69DfTC7QRee/gi2A8bz17pX85nKrGiLYn+Od8CEhTFxVk +lNgBLv4+RcYHVqxWlbJMdDliMN53E+hYbh0y+GDLjteEXbrxRo1aSgd/9PGiSl97 +KY4F7b/OwRzRZh1F+cXY+uP5ZQMbx5EMMkhzuj3Hiy/AVlQrW2B1lXtcf11YFFJj +xWq6YcpmEjL+xRq1PgoU7ahl6K0A3ScedQA5b1rLdPE8+bkRAfoN+0r8HVkIL7M+ +PorrwuWnvUmovZ0yDvm153HVvRnKZKHcelklphuUWfXvcRNITG/Rx6ssj+MVjqjb +Xy7t7wgIrk10TFWNEcunGjSSjPDkjYPazJ2dasI0rODzhlQzrnlWM+El9P5zSu2z +1Bvet44nmAKi2WLMda5YKbJcLSNbpBFB+rTwDt/D+dfwsJeC0sjpzzatKGXNJLJQ +7x9BZfAbBn0QrIZYGMkaxWvcpJcaVUbCKiST4DK5ze584ptrlH+Bqw4u4xLcVrdk +hu/8IBNybLrl4zahIz7bRRNmw5wo9zUVXPXEtuYak+MK+gmD3TzJ12OUKAlAj3Go +Fj3NFQoxBJJjuXM3zZRvHp+/AAOUANBYIyV2WssF6C+SH4o+jKyxWC/GawPFvx/B +gy55kdEt+ORdcOfV8L5Q2xI8Qpck6E3odmaHCvjz1bUVUWqhJcTuoewHRBfWiWgc +UCXBS/YgendUQroBOPyYIwTtk4XY9fhhKGI4LhWcx4LfzntBnM9FGmDOwhu3HqEd +HOs8p+HhB8LPjGRot63m7gkJ1T6AswSi9hTeZeSgXuSgL23zqwPGbGTwO3AmFs/M +8luXQ4My9bk74K3d9lFdJPaxeTpeeWNodnBItbioT5aImptU+pkKWLTVmXi4V+JE +1ootg+DSbz+bKp4A/LLOBO4Rsx5FCGAbBMnKc/n8lF86LjKq2PLRfgdPCaVfBrcd +TnOkBZYU0HwJAc++4AZQJvA/KRB4UPUzMe2atjVxcrr6r6vL8G04+7TBFoynpzJ+ +4KZPCJz0Avb4wYKu/IHkdKL7UY8WEGz1mMDbAu4/xCriLg49D2f1eY3FTEjBotBI +J9hE4ccmwqlxtl4qCVRezh0C+viJ6q2tCji2SPQviaVMNWiis9cZ52J+F9TC2p9R +PdatJg0rjuVzfoPFE8Rq8V6+zf818b19vQ4F31J+VXTz7sF8it9IO0w/3MbtfBNE +pKmMZ9h5RdSw1kXRWXbROR9XItS7gE1wkXAxw11z7jqNSNvhotkJXH/A5qGpTFBl +Z8A= +-----END ENCRYPTED PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDczCCAtygAwIBAgIBCzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1 +MTgxMFoXDTQxMDQyMjE1MTgxMFowazELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP +MA0GA1UECwwGS2VybmVsMRAwDgYDVQQDDAdsYXphcnVzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA0+uq+UcogTSS+BLNTwwsBU7/HnNNhNgLKnk8pdUC +UFOzAjXnXlXEravmbhWeIj5TsCElc5FPE66OvmiixFU6l27Z5P8gopjokxll7e1B +ujeJOXgy5h+K76xdeQ90JmQX4OO0K5rLXvNH3ufuhGr2NObrBz6kbF5Wdr3urPl6 +pFSLH02zPLqPHhhUvO8jcbUD3RrS/5ZGHqE++F+QRMuYeCXTjECA8iLDvQsiqvT6 +qK1y04V/8K0BYJd/yE31H3cvRLUu7mRAkN87lY1Aj0i3dKM/l2RAa3tsy2/kSDH3 +VeUaqjoPN8PTfJaoMZz7xV7C+Zha+JZh3E7pq6viMR6bkwIDAQABo3sweTAJBgNV +HRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZp +Y2F0ZTAdBgNVHQ4EFgQUbw3OWXLJpkDMpGnLWM4vxSbwUSAwHwYDVR0jBBgwFoAU +B0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAL+OC9x0P7Ql+ +8NbONrIeOIoJD++K5rUM0vI+u9RDAxTm9TO6cP7Cl6H4zzvlzJ3w9DL66c2r+ZTy +BxzFO1wtDKUo5RJKneC0tMz0rJQIWTqo45fDLs8UIDB5t4xp6zed34nvct+wIRaV +hCjHBaVmILlBWb6OF9/kl1JhLtElyDs= +-----END CERTIFICATE----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/server.pem b/src/mongo/gotools/test/legacy26/jstests/libs/server.pem new file mode 100644 index 00000000000..e5980d4856e --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/server.pem @@ -0,0 +1,34 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAK53miP9GczBWXnq +NxHwQkgVqsDuesjwJbWilMK4gf3fjnf2PN3qDpnGbZbPD0ij8975pIKtSPoDycFm +A8Mogip0yU2Lv2lL56CWthSBftOFDL2CWIsmuuURFXZPiVLtLytfI9oLASZFlywW +Cs83qEDTvdW8VoVhVsxV1JFDnpXLAgMBAAECgYBoGBgxrMt97UazhNkCrPT/CV5t +6lv8E7yMGMrlOyzkCkR4ssQyK3o2qbutJTGbR6czvIM5LKbD9Qqlh3ZrNHokWmTR +VQQpJxt8HwP5boQvwRHg9+KSGr4JvRko1qxFs9C7Bzjt4r9VxdjhwZPdy0McGI/z +yPXyQHjqBayrHV1EwQJBANorfCKeIxLhH3LAeUZuRS8ACldJ2N1kL6Ov43/v+0S/ +OprQeBTODuTds3sv7FCT1aYDTOe6JLNOwN2i4YVOMBsCQQDMuCozrwqftD17D06P +9+lRXUekY5kFBs5j28Xnl8t8jnuxsXtQUTru660LD0QrmDNSauhpEmlpJknicnGt +hmwRAkEA12MI6bBPlir0/jgxQqxI1w7mJqj8Vg27zpEuO7dzzLoyJHddpcSNBbwu +npaAakiZK42klj26T9+XHvjYRuAbMwJBAJ5WnwWEkGH/pUHGEAyYQdSVojDKe/MA +Vae0tzguFswK5C8GyArSGRPsItYYA7D4MlG/sGx8Oh2C6MiFndkJzBECQDcP1y4r +Qsek151t1zArLKH4gG5dQAeZ0Lc2VeC4nLMUqVwrHcZDdd1RzLlSaH3j1MekFVfT +6v6rrcNLEVbeuk4= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIC7jCCAlegAwIBAgIBCjANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx +ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD +VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1 +dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNTEz +MjU0MFoXDTQxMDQyMTEzMjU0MFowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l +dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP +MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBAK53miP9GczBWXnqNxHwQkgVqsDuesjwJbWilMK4gf3fjnf2 +PN3qDpnGbZbPD0ij8975pIKtSPoDycFmA8Mogip0yU2Lv2lL56CWthSBftOFDL2C +WIsmuuURFXZPiVLtLytfI9oLASZFlywWCs83qEDTvdW8VoVhVsxV1JFDnpXLAgMB +AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh +dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBQgCkKiZhUV9/Zo7RwYYwm2cNK6tzAf +BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB +gQCbsfr+Q4pty4Fy38lSxoCgnbB4pX6+Ex3xyw5zxDYR3xUlb/uHBiNZ1dBrXBxU +ekU8dEvf+hx4iRDSW/C5N6BGnBBhCHcrPabo2bEEWKVsbUC3xchTB5rNGkvnMt9t +G9ol7vanuzjL3S8/2PB33OshkBH570CxqqPflQbdjwt9dg== +-----END CERTIFICATE----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js new file mode 100644 index 00000000000..1e2c7391cb1 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js @@ -0,0 +1,20 @@ + +SlowWeeklyMongod = function( name ) { + this.name = name; + this.port = 30201; + + this.start = new Date(); + + this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" ); +}; + +SlowWeeklyMongod.prototype.getDB = function( name ) { + return this.conn.getDB( name ); +} + +SlowWeeklyMongod.prototype.stop = function(){ + stopMongod( this.port ); + var end = new Date(); + print( "slowWeekly test: " + this.name + " completed succesfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" ); +}; + diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem b/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem new file mode 100644 index 00000000000..0f6deb368c5 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem @@ -0,0 +1,50 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLSU04xAL7eZ/Y +J3euMDP/Uq7+a65zEIk7wzD2K5Htosbdysn67l8OzVlF2/IcB0/2SLuHHyC7+4pv +O2+ndtvi6hr9zF4S8Bz0In/UUb+WzhFHuZd0YLl2arhnYMoDUkyLheVqEcDbECgi +a6i5SNpAff2eUy29FVGwsaUl7+iEHqYxS9Ibmw1CeQYLEOGyhkTI9BjfO/3HwQyW +FmOJp/IAJUFRCXTgluaMHptaonX5GmRK64wlF8Reu+uyQRdWM0cK9b3AxbBWAAyT +SLQto+PW1J7QQ95Kn+aJ8nH1Jj80iUAjx2yAGchl1wfSHf5yAAo4OJNXgKUrQHIs +dofsw/KTAgMBAAECggEBAItF+SX/BJwNw7lvsMsiMz2mBEZCuA4VMjBDlnPRffT1 +JJInsSG91lppzdPS0JjrWZk+U1xLsz2XJEz4x5JQGG3qPfvL3FfVMcEBMdrg9wX2 +wFgHiwAslGPQ0e3hngWQiOi+H2MALsTm2NhcMghfJUgyCWRDUH7O8FzCGIdZSk/Z +Bx4CvBad+k+OFvUt03gwGtoCn7XneMRVGt04EU/srg0h6C3810k7+OLC1xZc8jaE +5UAZwKO4pqJn/w0s9T2eAC+b+1YNuUTLvMTdhfH6ZkANxgcfQHWok14iGxCyXMeQ +dBHeyNTIYKnfpwjFz85LgEvl4gsUTaa/IM0DfGPDOkECgYEA5z8Px0Sh0DSRr6PW +3Ki9sDtJP5f+x0ARaebOfkscOJ5YvDejIxVNVBi5PYRtfCyLT78AKpRfxtBDQtW1 +w02xqkh/RR/GZm8hLyh/KzroTA3+GQvMqnE1irkJCKEOWwUjZNAFt+kgZIQWCfbn +V1CjeK9xnEt00Icn7sh1CKubvakCgYEA4QwKZ2zj10i90NqlAAJlj6NTK/h+bHHw +6VkUUO93GJZ1cC++dVZRhPTqBRdACJSey4nCMFdO3PLwy2gBG9LwU4rcN0Euo2bm +J2uBBJVoXySE1250vem9I7KAramtTzQuHtIEvYhB3DHY+oYv4Eg6NSB4zAdtDKiV +iiP23IN0+9sCgYA0KHconQRab+EEWtIVx0GxxE2LOH9Q9dR3rIWa2tossxqUqX/0 +Y9OjSkhN5dbEEVAC1rP05q6Lq2Hga0+qE5YlMGD0eGxJons7pci5OXo33VgY0h6B +uzM2bPHqrlkMkqYfEQSZLM4PnfNSoAwiF6Anknrvo91fQ3zwUOqE4CAqsQKBgGX2 +a5xShKRcy8ud1JY9f8BlkmBgtP7zXOCMwJyu8nnMaacLqrJFCqg/wuvNjfCVTaEQ +aFA4rn2DAMBX/fCaUNK5Hm9WdAgKrgp8Nbda7i/1Ps7Qt8n35f8PeCe2sdQp4x+J +riYlXxmh6BoRxA1NDDpX3QMr9id/FknBY66jTNRzAoGBALab2GqBYInkmPj1nGDA +f9+VQWFzl98k0PbLQcvKgbWuxLDf/Pz9lBi9tPzhNuTRt9RLuCMc5ZbpPbHPNWI0 +6+zofHTHoW0+prDdtZqpEE/TKmr8emjYMf4CBIKwW3CwbBRLr9C8G01ClTaan2Ge +LMUhIseBsaQhmkL8n1AyauGL +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDlzCCAn+gAwIBAgIJAJDxQ4ilLvoVMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV +BAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAM +BgNVBAoMBTEwZ2VuMR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTAeFw0x +MjEyMDQxNTA0MDJaFw0xODA1MjcxNTA0MDJaMGIxCzAJBgNVBAYTAlVTMREwDwYD +VQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAMBgNVBAoMBTEwZ2Vu +MR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMtJTTjEAvt5n9gnd64wM/9Srv5rrnMQiTvDMPYrke2i +xt3KyfruXw7NWUXb8hwHT/ZIu4cfILv7im87b6d22+LqGv3MXhLwHPQif9RRv5bO +EUe5l3RguXZquGdgygNSTIuF5WoRwNsQKCJrqLlI2kB9/Z5TLb0VUbCxpSXv6IQe +pjFL0hubDUJ5BgsQ4bKGRMj0GN87/cfBDJYWY4mn8gAlQVEJdOCW5owem1qidfka +ZErrjCUXxF6767JBF1YzRwr1vcDFsFYADJNItC2j49bUntBD3kqf5onycfUmPzSJ +QCPHbIAZyGXXB9Id/nIACjg4k1eApStAcix2h+zD8pMCAwEAAaNQME4wHQYDVR0O +BBYEFO6qoBUb1CN4lCkGhaatcjUBKwWmMB8GA1UdIwQYMBaAFO6qoBUb1CN4lCkG +haatcjUBKwWmMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAGcJdsiI +JdhJDPkZksOhHZUMMRHLHfWubMGAvuml6hs+SL850DRc+vRP43eF/yz+WbEydkFz +3qXkQQSG8A2bLOtg0c6Gyi5snUOX0CKcOl3jitgwVkHcdX/v6vbiwALk+r8kJExv +vpiWIp3nxgLtYVJP/XPoEomEwmu5zWaw28MWXM4XrEjPYmK5ZL16VXXD+lfO0cnT +2vjkbNK8g7fKaIYYX+cr8GLZi19kO+jUYfhtxQbn8nxUfSjHseAy9BbOLUbGTdAV +MbGRQveOnFW0eDLjiZffwqCtn91EtYy+vBuYHT/C7Ws4hNwd9lTvmg0SHAm01vi1 +b4fBFFjNvg1wCrU= +-----END CERTIFICATE----- diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js b/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js new file mode 100644 index 00000000000..91f50aaa362 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js @@ -0,0 +1,340 @@ +// +// Utilities related to background operations while other operations are working +// + +/** + * Allows synchronization between background ops and the test operations + */ +var waitForLock = function( mongo, name ){ + + var ts = new ObjectId() + var lockColl = mongo.getCollection( "config.testLocks" ) + + lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true) + + // + // Wait until we can set the state to 1 with our id + // + + var startTime = new Date().getTime() + + assert.soon( function() { + lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } }) + var gleObj = lockColl.getDB().getLastErrorObj() + + if( new Date().getTime() - startTime > 20 * 1000 ){ + print( "Waiting for..." ) + printjson( gleObj ) + printjson( lockColl.findOne() ) + printjson( ts ) + } + + return gleObj.n == 1 || gleObj.updatedExisting + }, "could not acquire lock", 30 * 1000, 100 ) + + print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " + + tojson( lockColl.findOne({ _id : name }) ) ) + + // Set the state back to 0 + var unlock = function(){ + print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " + + tojson( lockColl.findOne({ _id : name }) ) ) + lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } }) + } + + // Return an object we can invoke unlock on + return { unlock : unlock } +} + +/** + * Allows a test or background op to say it's finished + */ +var setFinished = function( mongo, name, finished ){ + if( finished || finished == undefined ) + mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true ) + else + mongo.getCollection( "config.testFinished" ).remove({ _id : name }) +} + +/** + * Checks whether a test or background op is finished + */ +var isFinished = function( mongo, name ){ + return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null +} + +/** + * Sets the result of a background op + */ +var setResult = function( mongo, name, result, err ){ + mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true ) +} + +/** + * Gets the result for a background op + */ +var getResult = function( mongo, name ){ + return mongo.getCollection( "config.testResult" ).findOne({ _id : name }) +} + +/** + * Overrides the parallel shell code in mongo + */ +function startParallelShell( jsCode, port ){ + + var x; + if ( port ) { + x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode ); + } else { + x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null ); + } + + return function(){ + jsTestLog( "Waiting for shell " + x + "..." ) + waitProgram( x ); + jsTestLog( "Shell " + x + " finished." ) + }; +} + +startParallelOps = function( mongo, proc, args, context ){ + + var procName = proc.name + "-" + new ObjectId() + var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") ) + .getTimestamp().getTime() + + // Make sure we aren't finished before we start + setFinished( mongo, procName, false ) + setResult( mongo, procName, undefined, undefined ) + + // TODO: Make this a context of its own + var procContext = { procName : procName, + seed : seed, + waitForLock : waitForLock, + setFinished : setFinished, + isFinished : isFinished, + setResult : setResult, + + setup : function( context, stored ){ + + waitForLock = function(){ + return context.waitForLock( db.getMongo(), context.procName ) + } + setFinished = function( finished ){ + return context.setFinished( db.getMongo(), context.procName, finished ) + } + isFinished = function(){ + return context.isFinished( db.getMongo(), context.procName ) + } + setResult = function( result, err ){ + return context.setResult( db.getMongo(), context.procName, result, err ) + } + }} + + var bootstrapper = function( stored ){ + + var procContext = stored.procContext + procContext.setup( procContext, stored ) + + var contexts = stored.contexts + eval( "contexts = " + contexts ) + + for( var i = 0; i < contexts.length; i++ ){ + if( typeof( contexts[i] ) != "undefined" ){ + // Evaluate all contexts + contexts[i]( procContext ) + } + } + + var operation = stored.operation + eval( "operation = " + operation ) + + var args = stored.args + eval( "args = " + args ) + + result = undefined + err = undefined + + try{ + result = operation.apply( null, args ) + } + catch( e ){ + err = e + } + + setResult( result, err ) + } + + var contexts = [ RandomFunctionContext, context ] + + var testDataColl = mongo.getCollection( "config.parallelTest" ) + + testDataColl.insert({ _id : procName, + bootstrapper : tojson( bootstrapper ), + operation : tojson( proc ), + args : tojson( args ), + procContext : procContext, + contexts : tojson( contexts ) }) + + assert.eq( null, testDataColl.getDB().getLastError() ) + + var bootstrapStartup = + "{ var procName = '" + procName + "'; " + + "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" + + ".findOne({ _id : procName }); " + + "var bootstrapper = stored.bootstrapper; " + + "eval( 'bootstrapper = ' + bootstrapper ); " + + "bootstrapper( stored ); " + + "}" + + + var oldDB = db + db = mongo.getDB( "test" ) + + jsTest.log( "Starting " + proc.name + " operations..." ) + + var rawJoin = startParallelShell( bootstrapStartup ) + + db = oldDB + + + var join = function(){ + setFinished( mongo, procName, true ) + + rawJoin(); + result = getResult( mongo, procName ) + + assert.neq( result, null ) + + if( result.err ) throw "Error in parallel ops " + procName + " : " + + tojson( result.err ) + + else return result.result + } + + join.isFinished = function(){ + return isFinished( mongo, procName ) + } + + join.setFinished = function( finished ){ + return setFinished( mongo, procName, finished ) + } + + join.waitForLock = function( name ){ + return waitForLock( mongo, name ) + } + + return join +} + +var RandomFunctionContext = function( context ){ + + Random.srand( context.seed ); + + Random.randBool = function(){ return Random.rand() > 0.5 } + + Random.randInt = function( min, max ){ + + if( max == undefined ){ + max = min + min = 0 + } + + return min + Math.floor( Random.rand() * max ) + } + + Random.randShardKey = function(){ + + var numFields = 2 //Random.randInt(1, 3) + + var key = {} + for( var i = 0; i < numFields; i++ ){ + var field = String.fromCharCode( "a".charCodeAt() + i ) + key[ field ] = 1 + } + + return key + } + + Random.randShardKeyValue = function( shardKey ){ + + var keyValue = {} + for( field in shardKey ){ + keyValue[ field ] = Random.randInt(1, 100) + } + + return keyValue + } + + Random.randCluster = function(){ + + var numShards = 2 //Random.randInt( 1, 10 ) + var rs = false //Random.randBool() + var st = new ShardingTest({ shards : numShards, + mongos : 4, + other : { separateConfig : true, rs : rs } }) + + return st + } +} + + +// +// Some utility operations +// + +function moveOps( collName, options ){ + + options = options || {} + + var admin = db.getMongo().getDB( "admin" ) + var config = db.getMongo().getDB( "config" ) + var shards = config.shards.find().toArray() + var shardKey = config.collections.findOne({ _id : collName }).key + + while( ! isFinished() ){ + + var findKey = Random.randShardKeyValue( shardKey ) + var toShard = shards[ Random.randInt( shards.length ) ]._id + + try { + printjson( admin.runCommand({ moveChunk : collName, + find : findKey, + to : toShard }) ) + } + catch( e ){ + printjson( e ) + } + + sleep( 1000 ) + } + + jsTest.log( "Stopping moveOps..." ) +} + +function splitOps( collName, options ){ + + options = options || {} + + var admin = db.getMongo().getDB( "admin" ) + var config = db.getMongo().getDB( "config" ) + var shards = config.shards.find().toArray() + var shardKey = config.collections.findOne({ _id : collName }).key + + while( ! isFinished() ){ + + var middleKey = Random.randShardKeyValue( shardKey ) + + try { + printjson( admin.runCommand({ split : collName, + middle : middleKey }) ) + } + catch( e ){ + printjson( e ) + } + + sleep( 1000 ) + } + + jsTest.log( "Stopping splitOps..." ) +} + diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/testconfig b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig new file mode 100644 index 00000000000..0c1fc871d61 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig @@ -0,0 +1,4 @@ +fastsync = true +#comment line +#commentedflagwithan = false +version = false diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json new file mode 100644 index 00000000000..5af32aad7d3 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json @@ -0,0 +1,4 @@ +{ + "fastsync" : true, + "version" : false +} diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js new file mode 100644 index 00000000000..3faf50b4606 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js @@ -0,0 +1,90 @@ + +// +// On error inserting documents, traces back and shows where the document was dropped +// + +function traceMissingDoc( coll, doc, mongos ) { + + if (mongos) coll = mongos.getCollection(coll + ""); + else mongos = coll.getMongo(); + + var config = mongos.getDB( "config" ); + var shards = config.shards.find().toArray(); + for ( var i = 0; i < shards.length; i++ ) { + shards[i].conn = new Mongo( shards[i].host ); + } + + var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key; + + // Project out the shard key + var shardKey = {}; + for ( var k in shardKeyPatt ) { + if ( doc[k] == undefined ) { + jsTest.log( "Shard key " + tojson( shardKey ) + + " not found in doc " + tojson( doc ) + + ", falling back to _id search..." ); + shardKeyPatt = { _id : 1 }; + shardKey = { _id : doc['_id'] }; + break; + } + shardKey[k] = doc[k]; + } + + if ( doc['_id'] == undefined ) { + jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." ); + return; + } + + jsTest.log( "Using shard key : " + tojson( shardKey ) ); + + var allOps = []; + for ( var i = 0; i < shards.length; i++ ) { + + var oplog = shards[i].conn.getCollection( "local.oplog.rs" ); + if ( !oplog.findOne() ) { + oplog = shards[i].conn.getCollection( "local.oplog.$main" ); + } + + if ( !oplog.findOne() ) { + jsTest.log( "No oplog was found on shard " + shards[i]._id ); + continue; + } + + var addKeyQuery = function( query, prefix ) { + for ( var k in shardKey ) { + query[prefix + '.' + k] = shardKey[k]; + } + return query; + }; + + var addToOps = function( cursor ) { + cursor.forEach( function( doc ) { + doc.shard = shards[i]._id; + doc.realTime = new Date( doc.ts.getTime() * 1000 ); + allOps.push( doc ); + }); + }; + + // Find ops + addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) ); + var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ), + { op : 'u', 'o2._id' : doc['_id'] } ] }; + addToOps( oplog.find( updateQuery ) ); + addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) ); + } + + var compareOps = function( opA, opB ) { + if ( opA.ts < opB.ts ) return -1; + if ( opB.ts < opA.ts ) return 1; + else return 0; + } + + allOps.sort( compareOps ); + + print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" ); + for ( var i = 0; i < allOps.length; i++ ) { + printjson( allOps[i] ); + } + + return allOps; +}
\ No newline at end of file diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js b/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js new file mode 100644 index 00000000000..7f770249214 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js @@ -0,0 +1,12 @@ +var _orig_runMongoProgram = runMongoProgram; +runMongoProgram = function() { + var args = []; + for (var i in arguments) { + args[i] = arguments[i]; + } + var progName = args[0]; + if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) { + args.push("--dialTimeout", "30"); + } + return _orig_runMongoProgram.apply(null, args); +}; diff --git a/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js b/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js new file mode 100755 index 00000000000..ebbdc18ba3e --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js @@ -0,0 +1,18 @@ +o = "xxxxxxxxxxxxxxxxxxx"; +o = o + o; +o + o; +o = o + o; +o = o + o; +o = o + o; + +var B = 40000; +var last = new Date(); +for (i = 0; i < 30000000; i++) { + db.foo.insert({ o: o }); + if (i % B == 0) { + var n = new Date(); + print(i); + print("per sec: " + B*1000 / (n - last)); + last = n; + } +} diff --git a/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js b/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js new file mode 100644 index 00000000000..6a16db232e4 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js @@ -0,0 +1,115 @@ + +var count = 0; +var w = 0; + +var wait = function(f,msg) { + w++; + var n = 0; + while (!f()) { + if( n % 4 == 0 ) + print("waiting " + w); + if (++n == 4) { + print("" + f); + } + assert(n < 200, 'tried 200 times, giving up on ' + msg ); + sleep(1000); + } +}; + +/** + * Use this to do something once every 4 iterations. + * + * <pre> + * for (i=0; i<1000; i++) { + * occasionally(function() { print("4 more iterations"); }); + * } + * </pre> + */ +var occasionally = function(f, n) { + var interval = n || 4; + if (count % interval == 0) { + f(); + } + count++; +}; + +var reconnect = function(a) { + wait(function() { + try { + // make this work with either dbs or connections + if (typeof(a.getDB) == "function") { + db = a.getDB('foo'); + } + else { + db = a; + } + db.bar.stats(); + if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect + return jsTest.authenticate(db.getMongo()); + } + return true; + } catch(e) { + print(e); + return false; + } + }); +}; + + +var getLatestOp = function(server) { + server.getDB("admin").getMongo().setSlaveOk(); + var log = server.getDB("local")['oplog.rs']; + var cursor = log.find({}).sort({'$natural': -1}).limit(1); + if (cursor.hasNext()) { + return cursor.next(); + } + return null; +}; + + +var waitForAllMembers = function(master, timeout) { + var failCount = 0; + + assert.soon( function() { + var state = null + try { + state = master.getSisterDB("admin").runCommand({replSetGetStatus:1}); + failCount = 0; + } catch ( e ) { + // Connection can get reset on replica set failover causing a socket exception + print( "Calling replSetGetStatus failed" ); + print( e ); + return false; + } + occasionally(function() { printjson(state); }, 10); + + for (var m in state.members) { + if (state.members[m].state != 1 && // PRIMARY + state.members[m].state != 2 && // SECONDARY + state.members[m].state != 7) { // ARBITER + return false; + } + } + printjson( state ); + return true; + }, "not all members ready", timeout || 60000); + + print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" ); +}; + +var reconfig = function(rs, config) { + var admin = rs.getPrimary().getDB("admin"); + + try { + var ok = admin.runCommand({replSetReconfig : config}); + assert.eq(ok.ok,1); + } + catch(e) { + print(e); + } + + master = rs.getPrimary().getDB("admin"); + waitForAllMembers(master); + + return master; +}; diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js new file mode 100644 index 00000000000..5eb7ab0249a --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js @@ -0,0 +1,42 @@ +// csv1.js + +t = new ToolTest( "csv1" ) + +c = t.startDB( "foo" ); + +base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."}; + +assert.eq( 0 , c.count() , "setup1" ); +c.insert( base ); +delete base._id +assert.eq( 1 , c.count() , "setup2" ); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" ) + +c.drop() +assert.eq( 0 , c.count() , "after drop" ) + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" ); +assert.soon( "2 == c.count()" , "restore 2" ); + +a = c.find().sort( { a : 1 } ).toArray(); +delete a[0]._id +delete a[1]._id +assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" ); +assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" ) + +c.drop() +assert.eq( 0 , c.count() , "after drop 2" ) + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" ) +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); + +x = c.findOne() +delete x._id; +assert.eq( tojson( base ) , tojson(x) , "csv parse 2" ) + + + + +t.stop() diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js new file mode 100644 index 00000000000..2cd3c9c0447 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js @@ -0,0 +1,65 @@ +// csvexport1.js + + +t = new ToolTest( "csvexport1" ) + +c = t.startDB( "foo" ); + +assert.eq( 0 , c.count() , "setup1" ); + +objId = ObjectId() + +c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'}) +c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)}) +c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"), + c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i, + e : function foo() { print("Hello World!"); }}) + +assert.eq( 3 , c.count() , "setup2" ); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e") + + +c.drop() + +assert.eq( 0 , c.count() , "after drop" ) + +t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline"); + +assert.soon ( 3 + " == c.count()", "after import"); + +// Note: Exporting and Importing to/from CSV is not designed to be round-trippable +expected = [] +expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"}) +expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3}) +// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while +// they are stored as seconds. See SERVER-7718. +expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z", + c : { "$timestamp" : { "t" : 1234, "i" : 9876 } }, + d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })}) + +actual = [] +actual.push(c.find({a : 1}).toArray()[0]); +actual.push(c.find({a : -2.0}).toArray()[0]); +actual.push(c.find({a : "D76DF8"}).toArray()[0]); + +for (i = 0; i < expected.length; i++) { + delete actual[i]._id + assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length) + keys = Object.keys(expected[i]) + for(var j=0;j<keys.length;j++){ + expectedVal = expected[i][keys[j]] + if((typeof expectedVal)== "object"){ + // For fields which contain arrays or objects, they have been + // exported as JSON - parse the JSON in the output and verify + // that it matches the original document's value + assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i) + }else{ + // Otherwise just compare the values directly + assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i) + } + } +} + + +t.stop() diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js new file mode 100644 index 00000000000..3e0dd2c6829 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js @@ -0,0 +1,31 @@ +// csvexport2.js + +t = new ToolTest( "csvexport2" ) + +c = t.startDB( "foo" ); + +// This test is designed to test exporting of a CodeWithScope object. +// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell, +// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out + +//assert.eq( 0 , c.count() , "setup1" ); + +//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})}) +//assert.eq( 1 , c.count() , "setup2" ); +//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b") + + +//c.drop() + +//assert.eq( 0 , c.count() , "after drop" ) +//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline"); +//assert.soon ( 1 + " == c.count()", "after import"); + +//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"}; +//actual = c.findOne() + +//delete actual._id; +//assert.eq( expected, actual ); + + +t.stop()
\ No newline at end of file diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js new file mode 100644 index 00000000000..3bff1110cbe --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js @@ -0,0 +1,40 @@ +// csvimport1.js + +t = new ToolTest( "csvimport1" ) + +c = t.startDB( "foo" ); + +base = [] +base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" }) +base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" }) +base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" }) +base.push({a : 4, b : "", "c" : "How are empty entries handled?" }) +base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""}) +base.push({ a : "a" , b : "b" , c : "c"}) + +assert.eq( 0 , c.count() , "setup" ); + +t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" ); +assert.soon( base.length + " == c.count()" , "after import 1 " ); + +a = c.find().sort( { a : 1 } ).toArray(); +for (i = 0; i < base.length; i++ ) { + delete a[i]._id + assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i) +} + +c.drop() +assert.eq( 0 , c.count() , "after drop" ) + +t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" ) +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( base.length - 1 , c.count() , "after import 2" ); + +x = c.find().sort( { a : 1 } ).toArray(); +for (i = 0; i < base.length - 1; i++ ) { + delete x[i]._id + assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i) +} + + +t.stop() diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv b/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv new file mode 100644 index 00000000000..1e094179a63 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv @@ -0,0 +1,2 @@ +a b c d e + 1 foobar 5 -6 diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv new file mode 100644 index 00000000000..256d40a9184 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv @@ -0,0 +1,8 @@ +a,b,c +1,"this is some text. +This text spans multiple lines, and just for fun +contains a comma", "This has leading and trailing whitespace!" +2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes + 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "" + "4" ,, How are empty entries handled? +"5","""""", """This string is in quotes and contains empty quotes ("""")""" diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson Binary files differnew file mode 100644 index 00000000000..b8f8f99e6bf --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson Binary files differnew file mode 100644 index 00000000000..dde25da302a --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js new file mode 100644 index 00000000000..2a2d613b708 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js @@ -0,0 +1,27 @@ +// dumpauth.js +// test mongodump with authentication +port = allocatePorts( 1 )[ 0 ]; +baseName = "tool_dumpauth"; + +m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); +db = m.getDB( "admin" ); + +t = db[ baseName ]; +t.drop(); + +for(var i = 0; i < 100; i++) { + t["testcol"].save({ "x": i }); +} + +db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles}); + +assert( db.auth( "testuser" , "testuser" ) , "auth failed" ); + +x = runMongoProgram( "mongodump", + "--db", baseName, + "--authenticationDatabase=admin", + "-u", "testuser", + "-p", "testuser", + "-h", "127.0.0.1:"+port, + "--collection", "testcol" ); +assert.eq(x, 0, "mongodump should succeed with authentication"); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js new file mode 100644 index 00000000000..fbe24551929 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js @@ -0,0 +1,14 @@ +//dumpfilename1.js + +//Test designed to make sure error that dumping a collection with "/" fails + +t = new ToolTest( "dumpfilename1" ); + +t.startDB( "foo" ); + +c = t.db; +c.getCollection("df/").insert({ a: 3 }) +assert(c.getCollection("df/").count() > 0) // check write worked +assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code") +t.stop(); + diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js new file mode 100644 index 00000000000..fd1e8789ea6 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js @@ -0,0 +1,23 @@ +// dumprestore1.js + +t = new ToolTest( "dumprestore1" ); + +c = t.startDB( "foo" ); +assert.eq( 0 , c.count() , "setup1" ); +c.save( { a : 22 } ); +assert.eq( 1 , c.count() , "setup2" ); + +t.runTool( "dump" , "--out" , t.ext ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" ); + +t.runTool( "restore" , "--dir" , t.ext ); +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); +assert.eq( 22 , c.findOne().a , "after restore 2" ); + +// ensure that --collection is used with --db. See SERVER-7721 +var ret = t.runTool( "dump" , "--collection" , "col" ); +assert.neq( ret, 0, "mongodump should return failure code" ); +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js new file mode 100644 index 00000000000..49f008ea591 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js @@ -0,0 +1,63 @@ +// simple test to ensure write concern functions as expected + +var name = "dumprestore10"; + +function step(msg) { + msg = msg || ""; + this.x = (this.x || 0) + 1; + print('\n' + name + ".js step " + this.x + ' ' + msg); +} + +step(); + +var replTest = new ReplSetTest( {name: name, nodes: 2} ); +var nodes = replTest.startSet(); +replTest.initiate(); +var master = replTest.getPrimary(); +var total = 1000; + +{ + step("store data"); + var foo = master.getDB("foo"); + for (i = 0; i < total; i++) { + foo.bar.insert({ x: i, y: "abc" }); + } +} + +{ + step("wait"); + replTest.awaitReplication(); +} + +step("mongodump from replset"); + +var data = MongoRunner.dataDir + "/dumprestore10-dump1/"; + +runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data ); + + +{ + step("remove data after dumping"); + master.getDB("foo").getCollection("bar").drop(); +} + +{ + step("wait"); + replTest.awaitReplication(); +} + +step("try mongorestore with write concern"); + +runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data ); + +var x = 0; + +// no waiting for replication +x = master.getDB("foo").getCollection("bar").count(); + +assert.eq(x, total, "mongorestore should have successfully restored the collection"); + +step("stopSet"); +replTest.stopSet(); + +step("SUCCESS"); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js new file mode 100644 index 00000000000..f1e5941cbd0 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js @@ -0,0 +1,60 @@ +// dumprestore3.js + +var name = "dumprestore3"; + +function step(msg) { + msg = msg || ""; + this.x = (this.x || 0) + 1; + print('\n' + name + ".js step " + this.x + ' ' + msg); +} + +step(); + +var replTest = new ReplSetTest( {name: name, nodes: 2} ); +var nodes = replTest.startSet(); +replTest.initiate(); +var master = replTest.getPrimary(); + +{ + step("populate master"); + var foo = master.getDB("foo"); + for (i = 0; i < 20; i++) { + foo.bar.insert({ x: i, y: "abc" }); + } +} + +{ + step("wait for slaves"); + replTest.awaitReplication(); +} + +{ + step("dump & restore a db into a slave"); + var port = 30020; + var conn = startMongodTest(port, name + "-other"); + var c = conn.getDB("foo").bar; + c.save({ a: 22 }); + assert.eq(1, c.count(), "setup2"); +} + +step("try mongorestore to slave"); + +var data = MongoRunner.dataDir + "/dumprestore3-other1/"; +resetDbpath(data); +runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data ); + +var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data ); +assert.eq(x, 1, "mongorestore should exit w/ -1 on slave"); + +step("try mongoimport to slave"); + +dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json"; +runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" ); + +x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile ); +assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed + +step("stopSet"); +replTest.stopSet(); + +step("SUCCESS"); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js new file mode 100644 index 00000000000..568e196061f --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js @@ -0,0 +1,42 @@ +// dumprestore4.js -- see SERVER-2186 + +// The point of this test is to ensure that mongorestore successfully +// constructs indexes when the database being restored into has a +// different name than the database dumped from. There are 2 +// issues here: (1) if you dumped from database "A" and restore into +// database "B", B should have exactly the right indexes; (2) if for +// some reason you have another database called "A" at the time of the +// restore, mongorestore shouldn't touch it. + +t = new ToolTest( "dumprestore4" ); + +c = t.startDB( "dumprestore4" ); + +db=t.db + +dbname = db.getName(); +dbname2 = "NOT_"+dbname; + +db2=db.getSisterDB( dbname2 ); + +db.dropDatabase(); // make sure it's empty +db2.dropDatabase(); // make sure everybody's empty + +assert.eq( 0 , db.system.indexes.count() , "setup1" ); +c.ensureIndex({ x : 1} ); +assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1 + +assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump") + +// to ensure issue (2), we have to clear out the first db. +// By inspection, db.dropIndexes() doesn't get rid of the _id index on c, +// so we have to drop the collection. +c.drop(); +assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" ); + +// issue (1) +assert.eq( 2 , db2.system.indexes.count() , "after restore 1" ); +// issue (2) +assert.eq( 0 , db.system.indexes.count() , "after restore 2" ); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js new file mode 100644 index 00000000000..d8b349e9589 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js @@ -0,0 +1,27 @@ +// dumprestore6.js +// Test restoring from a dump with an old index version + +t = new ToolTest( "dumprestore6" ); + +c = t.startDB( "foo" ); +db = t.db +assert.eq( 0 , c.count() , "setup1" ); + +t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6") + +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore" ); +assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated") +assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection") + +db.dropDatabase() +assert.eq( 0 , c.count() , "after drop" ); + +t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion") + +assert.soon( "c.findOne()" , "no data after sleep2" ); +assert.eq( 1 , c.count() , "after restore2" ); +assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained") +assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection") + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js new file mode 100644 index 00000000000..a71725f434b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js @@ -0,0 +1,66 @@ +var name = "dumprestore7"; + + +function step(msg) { + msg = msg || ""; + this.x = (this.x || 0) + 1; + print('\n' + name + ".js step " + this.x + ' ' + msg); +} + +step(); + +var replTest = new ReplSetTest( {name: name, nodes: 1} ); +var nodes = replTest.startSet(); +replTest.initiate(); +var master = replTest.getPrimary(); + +{ + step("first chunk of data"); + var foo = master.getDB("foo"); + for (i = 0; i < 20; i++) { + foo.bar.insert({ x: i, y: "abc" }); + } +} + +{ + step("wait"); + replTest.awaitReplication(); + var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next(); + step(time.ts.t); +} + +{ + step("second chunk of data"); + var foo = master.getDB("foo"); + for (i = 30; i < 50; i++) { + foo.bar.insert({ x: i, y: "abc" }); + } +} +{ + var port = 30020; + var conn = startMongodTest(port, name + "-other"); +} + +step("try mongodump with $timestamp"); + +var data = MongoRunner.dataDir + "/dumprestore7-dump1/"; +var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}"; + +MongoRunner.runMongoTool( "mongodump", + { "host": "127.0.0.1:"+replTest.ports[0], + "db": "local", "collection": "oplog.rs", + "query": query, "out": data }); + +step("try mongorestore from $timestamp"); + +runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data ); +var x = 9; +x = conn.getDB("local").getCollection("oplog.rs").count(); + +assert.eq(x, 20, "mongorestore should only have the latter 20 entries"); + +step("stopSet"); +replTest.stopSet(); + +step("SUCCESS"); + diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js new file mode 100644 index 00000000000..4e6591738d6 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js @@ -0,0 +1,105 @@ +// dumprestore8.js + +// This file tests that indexes and capped collection options get properly dumped and restored. +// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection + +t = new ToolTest( "dumprestore8" ); + +t.startDB( "foo" ); +db = t.db; + +dbname = db.getName(); +dbname2 = "NOT_"+dbname; + +db.dropDatabase(); + +assert.eq( 0 , db.foo.count() , "setup1" ); +db.foo.save( { a : 1, b : 1 } ); +db.foo.ensureIndex({a:1}); +db.foo.ensureIndex({b:1, _id:-1}); +assert.eq( 1 , db.foo.count() , "setup2" ); + + +assert.eq( 0 , db.bar.count() , "setup3" ); +db.createCollection("bar", {capped:true, size:1000}); + +for (var i = 0; i < 1000; i++) { + db.bar.save( { x : i } ); +} +db.bar.ensureIndex({x:1}); + +barDocCount = db.bar.count(); +assert.gt( barDocCount, 0 , "No documents inserted" ); +assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" ); +assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" ); + + +// Full dump/restore + +t.runTool( "dump" , "--out" , t.ext ); + +db.dropDatabase(); +assert.eq( 0 , db.foo.count() , "foo not dropped" ); +assert.eq( 0 , db.bar.count() , "bar not dropped" ); +assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" ); + +t.runTool( "restore" , "--dir" , t.ext ); + +assert.soon( "db.foo.findOne()" , "no data after sleep" ); +assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" ); +assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" ); +for (var i = 0; i < 10; i++) { + db.bar.save({x:i}); +} +assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." ); +assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" ); + + +// Dump/restore single DB + +dumppath = t.ext + "singledbdump/"; +mkdir(dumppath); +t.runTool( "dump" , "-d", dbname, "--out" , dumppath ); + +db.dropDatabase(); +assert.eq( 0 , db.foo.count() , "foo not dropped2" ); +assert.eq( 0 , db.bar.count() , "bar not dropped2" ); +assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" ); + +t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname ); + +db = db.getSiblingDB(dbname2); + +assert.soon( "db.foo.findOne()" , "no data after sleep 2" ); +assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" ); +assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" ); +for (var i = 0; i < 10; i++) { + db.bar.save({x:i}); +} +assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." ); +assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" ); + + +// Dump/restore single collection + +dumppath = t.ext + "singlecolldump/"; +mkdir(dumppath); +t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath ); + +db.dropDatabase(); +assert.eq( 0 , db.bar.count() , "bar not dropped3" ); +assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" ); + +t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" ); + +db = db.getSiblingDB(dbname); + +assert.soon( "db.baz.findOne()" , "no data after sleep 2" ); +assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" ); +for (var i = 0; i < 10; i++) { + db.baz.save({x:i}); +} +assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." ); +assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" ); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js new file mode 100644 index 00000000000..4bbb2fc18b1 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js @@ -0,0 +1,79 @@ +if (0) { // Test disabled until SERVER-3853 is finished. +var name = "dumprestore9"; +function step(msg) { + msg = msg || ""; + this.x = (this.x || 0) + 1; + print('\n' + name + ".js step " + this.x + ' ' + msg); +} + +s = new ShardingTest( "dumprestore9a", 2, 0, 3, {chunksize:1} ); + +step("Shard collection"); + +s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first +s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } ); + +db = s.getDB( "aaa" ); +coll = db.foo; + +step("insert data"); + +str = 'a'; +while (str.length < 1024*512) { + str += str; +} + +numDocs = 20; +for (var i = 0; i < numDocs; i++) { + coll.insert({x:i, str:str}); +} + +step("Wait for balancing"); + +assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 ); + +assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly"); + +step("dump cluster"); + +dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/"; +resetDbpath(dumpdir); +runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir ); + +step("Shutting down cluster"); + +s.stop(); + +step("Starting up clean cluster"); +s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} ); + +db = s.getDB( "aaa" ); +coll = db.foo; + +assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test"); + +step("Restore data and config"); + +runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore"); + +config = s.getDB("config"); +assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly"); + +assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly"); + +assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2"); +assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly"); + +for (var i = 0; i < numDocs; i++) { + doc = coll.findOne({x:i}); + assert.eq(i, doc.x, "Doc missing from the shard it should be on"); +} + +for (var i = 0; i < s._connections.length; i++) { + assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host); +} + +step("Stop cluster"); +s.stop(); +step("SUCCESS"); +}
\ No newline at end of file diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js new file mode 100644 index 00000000000..d6b87ffe70c --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js @@ -0,0 +1,107 @@ +// SERVER-6366 +// relates to SERVER-808 +// +// This file tests that options are not restored upon +// mongorestore with --noOptionsRestore +// +// It checks that this works both when doing a full +// database dump/restore and when doing it just for a +// single db or collection. + +t = new ToolTest( "dumprestoreWithNoOptions" ); + +t.startDB( "foo" ); +db = t.db; + +dbname = db.getName(); +dbname2 = "NOT_"+dbname; + +db.dropDatabase(); + +var options = { capped: true, size: 4096, autoIndexId: true }; +db.createCollection('capped', options); +assert.eq( 1, db.system.indexes.count(), "auto index not created" ); +var cappedOptions = db.capped.exists().options; +for ( var opt in options ) { + assert.eq(options[opt], cappedOptions[opt], + 'invalid option:' + tojson(options) + " " + tojson(cappedOptions)); +} +db.capped.insert({ x: 1 }); +db.getLastError() + +// Full dump/restore + +t.runTool( "dump" , "--out" , t.ext ); + +db.dropDatabase(); +assert.eq( 0, db.capped.count(), "capped not dropped"); +assert.eq( 0, db.system.indexes.count(), "indexes not dropped" ); + +t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore"); + +assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" ); +assert(true !== db.capped.stats().capped, "restore options were not ignored"); +assert(undefined === db.capped.exists().options, + "restore options not ignored: " + tojson( db.capped.exists() ) ); + +// Dump/restore single DB + +db.dropDatabase(); +var options = { capped: true, size: 4096, autoIndexId: true }; +db.createCollection('capped', options); +assert.eq( 1, db.system.indexes.count(), "auto index not created" ); +var cappedOptions = db.capped.exists().options; +for ( var opt in options ) { + assert.eq(options[opt], cappedOptions[opt], 'invalid option') +} +db.capped.insert({ x: 1 }); +db.getLastError() + +dumppath = t.ext + "noOptionsSingleDump/"; +mkdir(dumppath); +t.runTool( "dump" , "-d", dbname, "--out" , dumppath ); + +db.dropDatabase(); +assert.eq( 0, db.capped.count(), "capped not dropped"); +assert.eq( 0, db.system.indexes.count(), "indexes not dropped" ); + +t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore"); + +db = db.getSiblingDB(dbname2); + +assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" ); +assert(true !== db.capped.stats().capped, "restore options were not ignored"); +assert(undefined === db.capped.exists().options, "restore options not ignored"); + +// Dump/restore single collection + +db.dropDatabase(); +var options = { capped: true, size: 4096, autoIndexId: true }; +db.createCollection('capped', options); +assert.eq( 1, db.system.indexes.count(), "auto index not created" ); +var cappedOptions = db.capped.exists().options; +for ( var opt in options ) { + assert.eq(options[opt], cappedOptions[opt], 'invalid option') +} +db.capped.insert({ x: 1 }); +db.getLastError() + +dumppath = t.ext + "noOptionsSingleColDump/"; +mkdir(dumppath); +dbname = db.getName(); +t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath ); + +db.dropDatabase(); + +assert.eq( 0, db.capped.count(), "capped not dropped"); +assert.eq( 0, db.system.indexes.count(), "indexes not dropped" ); + +t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname ); + +db = db.getSiblingDB(dbname); + +assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" ); +assert( true !== db.capped.stats().capped, "restore options were not ignored" ); +assert( undefined === db.capped.exists().options ); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js new file mode 100644 index 00000000000..f99b5d0405c --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js @@ -0,0 +1,35 @@ +// dumprestore_auth.js + +t = new ToolTest("dumprestore_auth", { auth : "" }); + +c = t.startDB("foo"); + +adminDB = c.getDB().getSiblingDB('admin'); +adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']}); +adminDB.auth('admin','password'); +adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']}); +adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']}); + +assert.eq(0 , c.count() , "setup1"); +c.save({ a : 22 }); +assert.eq(1 , c.count() , "setup2"); + +assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false})); +assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags); + +t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password"); + +c.drop(); +assert.eq(0 , c.count() , "after drop"); + +t.runTool("restore" , "--dir" , t.ext, "--writeConcern", "0"); // Should fail +assert.eq(0 , c.count() , "after restore without auth"); + +t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0"); +assert.soon("c.findOne()" , "no data after sleep"); +assert.eq(1 , c.count() , "after restore 2"); +assert.eq(22 , c.findOne().a , "after restore 2"); +assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags); +assert.eq(3, adminDB.system.users.count()); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js new file mode 100644 index 00000000000..fd7d9a034d3 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js @@ -0,0 +1,96 @@ +// dumprestore_auth2.js +// Tests that mongodump and mongorestore properly handle access control information +// Tests that the default auth roles of backup and restore work properly. + +t = new ToolTest("dumprestore_auth2", {auth: ""}); + +coll = t.startDB("foo"); +admindb = coll.getDB().getSiblingDB("admin") + +// Create the relevant users and roles. +admindb.createUser({user: "root", pwd: "pass", roles: ["root"]}); +admindb.auth("root", "pass"); + +admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]}); +admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]}); + +admindb.createRole({role: "customRole", + privileges:[{resource: {db: "jstests_tool_dumprestore_auth2", + collection: "foo"}, + actions: ["find"]}], + roles:[]}); +admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]}); + +coll.insert({word: "tomato"}); +assert.eq(1, coll.count()); + +assert.eq(4, admindb.system.users.count(), "setup users") +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}), + "setup2: " + tojson( admindb.system.users.getIndexes() ) ); +assert.eq(1, admindb.system.roles.count(), "setup3") +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), "setup4") +assert.eq(1, admindb.system.version.count()); +var versionDoc = admindb.system.version.findOne(); + +// Logout root user. +admindb.logout(); + +// Verify that the custom role works as expected. +admindb.auth("test", "pass"); +assert.eq("tomato", coll.findOne().word); +admindb.logout(); + +// Dump the database. +t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass"); + +// Drop the relevant data in the database. +admindb.auth("root", "pass"); +coll.getDB().dropDatabase(); +admindb.dropUser("backup"); +admindb.dropUser("test"); +admindb.dropRole("customRole"); + +assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users"); +assert.eq(0, admindb.system.roles.count(), "didn't drop roles"); +assert.eq(0, coll.count(), "didn't drop foo coll"); + +t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0"); + +assert.soon("admindb.system.users.findOne()", "no data after restore"); +assert.eq(4, admindb.system.users.count(), "didn't restore users"); +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}), + "didn't restore user indexes"); +assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles"); +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), + "didn't restore role indexes"); + +admindb.logout(); + +// Login as user with customRole to verify privileges are restored. +admindb.auth("test", "pass"); +assert.eq("tomato", coll.findOne().word); +admindb.logout(); + +admindb.auth("root", "pass"); +admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]}); +admindb.dropRole("customRole"); +admindb.createRole({role: "customRole2", roles: [], privileges:[]}); +admindb.dropUser("root"); +admindb.logout(); + +t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0"); + +admindb.auth("root", "pass"); +assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2"); +assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users"); +assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles"); +assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles"); +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}), + "didn't maintain user indexes"); +assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), + "didn't maintain role indexes"); +assert.eq(1, admindb.system.version.count(), "didn't restore version"); +assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly"); +admindb.logout(); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js new file mode 100644 index 00000000000..b87418ed176 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js @@ -0,0 +1,199 @@ +// dumprestore_auth3.js +// Tests that mongodump and mongorestore properly handle access control information when doing +// single-db dumps and restores + + +// Runs the tool with the given name against the given mongod. +function runTool(toolName, mongod, options) { + var opts = {host: mongod.host}; + Object.extend(opts, options); + MongoRunner.runMongoTool(toolName, opts); +} + +var mongod = MongoRunner.runMongod(); +var admindb = mongod.getDB("admin"); +var db = mongod.getDB("foo"); + +jsTestLog("Creating Admin user & initial data"); +admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']}); +admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']}); +admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']}); +admindb.createRole({role: "dummyRole", roles: [], privileges:[]}); +db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles}); +db.createRole({role: 'role', roles: [], privileges:[]}); +var backupActions = ['find']; +db.createRole({role: 'backupFooChester', + privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}], + roles: []}); +db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']}); + +var userCount = db.getUsers().length; +var rolesCount = db.getRoles().length; +var adminUsersCount = admindb.getUsers().length; +var adminRolesCount = admindb.getRoles().length; +var systemUsersCount = admindb.system.users.count(); +var systemVersionCount = admindb.system.version.count(); + +db.bar.insert({a:1}); + +assert.eq(1, db.bar.findOne().a); +assert.eq(userCount, db.getUsers().length, "setup"); +assert.eq(rolesCount, db.getRoles().length, "setup2"); +assert.eq(adminUsersCount, admindb.getUsers().length, "setup3"); +assert.eq(adminRolesCount, admindb.getRoles().length, "setup4"); +assert.eq(systemUsersCount, admindb.system.users.count(), "setup5"); +assert.eq(systemVersionCount, admindb.system.version.count(),"system version"); +assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing"); +assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing"); +var versionDoc = admindb.system.version.findOne(); + +jsTestLog("Dump foo database without dumping user data"); +var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3"); +runTool("mongodump", mongod, {out: dumpDir, db: "foo"}); +db = mongod.getDB('foo'); + +db.dropDatabase(); +db.dropAllUsers(); +db.dropAllRoles(); + +jsTestLog("Restore foo database from dump that doesn't contain user data "); +runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"}); + +db = mongod.getDB('foo'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(1, db.bar.findOne().a); +assert.eq(0, db.getUsers().length, "Restore created users somehow"); +assert.eq(0, db.getRoles().length, "Restore created roles somehow"); + +// Re-create user data +db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}); +db.createRole({role: 'role', roles: [], privileges:[]}); +userCount = 1; +rolesCount = 1; + +assert.eq(1, db.bar.findOne().a); +assert.eq(userCount, db.getUsers().length, "didn't create user"); +assert.eq(rolesCount, db.getRoles().length, "didn't create role"); + +jsTestLog("Dump foo database *with* user data"); +runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""}); +db = mongod.getDB('foo'); + +db.dropDatabase(); +db.dropAllUsers(); +db.dropAllRoles(); + +assert.eq(0, db.getUsers().length, "didn't drop users"); +assert.eq(0, db.getRoles().length, "didn't drop roles"); +assert.eq(0, db.bar.count(), "didn't drop 'bar' collection"); + +jsTestLog("Restore foo database without restoring user data, even though it's in the dump"); +runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"}); +db = mongod.getDB('foo'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(1, db.bar.findOne().a); +assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have"); +assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have"); + +jsTestLog("Restore foo database *with* user data"); +runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"}); +db = mongod.getDB('foo'); +admindb = mongod.getDB('admin'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(1, db.bar.findOne().a); +assert.eq(userCount, db.getUsers().length, "didn't restore users"); +assert.eq(rolesCount, db.getRoles().length, "didn't restore roles"); +assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing"); +assert.docEq(versionDoc, + db.getSiblingDB('admin').system.version.findOne(), + "version doc was changed by restore"); + +jsTestLog("Make modifications to user data that should be overridden by the restore"); +db.dropUser('user') +db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles}); +db.dropRole('role') +db.createRole({role: 'role2', roles: [], privileges:[]}); + +jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made"); +// Restore with --drop to override the changes to user data +runTool("mongorestore", mongod, + {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"}); +db = mongod.getDB('foo'); +admindb = mongod.getDB('admin'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped"); +assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped"); +assert.eq(1, db.bar.findOne().a); +assert.eq(userCount, db.getUsers().length, "didn't restore users"); +assert.eq("user", db.getUser('user').user, "didn't update user"); +assert.eq(rolesCount, db.getRoles().length, "didn't restore roles"); +assert.eq("role", db.getRole('role').role, "didn't update role"); +assert.docEq(versionDoc, + db.getSiblingDB('admin').system.version.findOne(), + "version doc was changed by restore"); + + +jsTestLog("Dump just the admin database. User data should be dumped by default"); +// Make a user in another database to make sure it is properly captured +db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []}); +db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []}); +adminUsersCount += 1; +runTool("mongodump", mongod, {out: dumpDir, db: "admin"}); +db = mongod.getDB('foo'); + +// Change user data a bit. +db.dropAllUsers(); +db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []}); +db.getSiblingDB('admin').dropAllUsers(); + +jsTestLog("Restore just the admin database. User data should be restored by default"); +runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"}); +db = mongod.getDB('foo'); +var otherdb = db.getSiblingDB('bar'); +var admindb = db.getSiblingDB('admin'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(1, db.bar.findOne().a); +assert.eq(userCount, db.getUsers().length, "didn't restore users"); +assert.eq("user", db.getUser('user').user, "didn't restore user"); +assert.eq(rolesCount, db.getRoles().length, "didn't restore roles"); +assert.eq("role", db.getRole('role').role, "didn't restore role"); +assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database"); +assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database"); +assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database"); +assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database"); +assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server"); +assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server"); +assert.docEq(versionDoc, + db.getSiblingDB('admin').system.version.findOne(), + "version doc was changed by restore"); + +jsTestLog("Dump all databases"); +runTool("mongodump", mongod, {out: dumpDir}); +db = mongod.getDB('foo'); + +db.dropDatabase(); +db.dropAllUsers(); +db.dropAllRoles(); + +assert.eq(0, db.getUsers().length, "didn't drop users"); +assert.eq(0, db.getRoles().length, "didn't drop roles"); +assert.eq(0, db.bar.count(), "didn't drop 'bar' collection"); + +jsTestLog("Restore all databases"); +runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"}); +db = mongod.getDB('foo'); + +assert.soon(function() { return db.bar.findOne(); }, "no data after restore"); +assert.eq(1, db.bar.findOne().a); +assert.eq(1, db.getUsers().length, "didn't restore users"); +assert.eq(1, db.getRoles().length, "didn't restore roles"); +assert.docEq(versionDoc, + db.getSiblingDB('admin').system.version.findOne(), + "version doc was changed by restore"); + +MongoRunner.stopMongod(mongod); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js new file mode 100644 index 00000000000..7a641542498 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js @@ -0,0 +1,38 @@ +var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} ); + +var nodes = replTest.startSet(); +replTest.initiate(); + +var master = replTest.getPrimary(); +db = master.getDB("foo") +db.foo.save({a: 1000}); +replTest.awaitReplication(); +replTest.awaitSecondaryNodes(); + +assert.eq( 1 , db.foo.count() , "setup" ); + +var slaves = replTest.liveNodes.slaves; +assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length ); +slave = slaves[0]; + +var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/']; +var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword]; +if (jsTest.options().keyFile) { + args = args.concat(authargs); +} +runMongoProgram.apply(null, args); +db.foo.drop() + +assert.eq( 0 , db.foo.count() , "after drop" ); +args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/']; +if (jsTest.options().keyFile) { + args = args.concat(authargs); +} +runMongoProgram.apply(null, args) +assert.soon( "db.foo.findOne()" , "no data after sleep" ); +assert.eq( 1 , db.foo.count() , "after restore" ); +assert.eq( 1000 , db.foo.findOne().a , "after restore 2" ); + +resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external') + +replTest.stopSet(15) diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js new file mode 100644 index 00000000000..a7a7bcee90c --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js @@ -0,0 +1,66 @@ +// exportimport1.js + +t = new ToolTest( "exportimport1" ); + +c = t.startDB( "foo" ); +assert.eq( 0 , c.count() , "setup1" ); +var arr = ["x", undefined, "y", undefined]; +c.save( { a : 22 , b : arr} ); +assert.eq( 1 , c.count() , "setup2" ); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );; + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); +var doc = c.findOne(); +assert.eq( 22 , doc.a , "after restore 2" ); +for (var i=0; i<arr.length; i++) { + if (typeof arr[i] == 'undefined') { + // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102 + assert.eq( null, doc.b[i] , "after restore array: "+i ); + } else { + assert.eq( arr[i], doc.b[i] , "after restore array: "+i ); + } +} + +// now with --jsonArray + +t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );; + +t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); +assert.eq( 22 , c.findOne().a , "after restore 2" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +arr = ["a", undefined, "c"]; +c.save({a : arr}); +assert.eq( 1 , c.count() , "setup2" ); +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );; + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); +var doc = c.findOne(); +for (var i=0; i<arr.length; i++) { + if (typeof arr[i] == 'undefined') { + // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102 + assert.eq( null, doc.a[i] , "after restore array: "+i ); + } else { + assert.eq( arr[i], doc.a[i] , "after restore array: "+i ); + } +} + + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js new file mode 100644 index 00000000000..f18ba6cbd4b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js @@ -0,0 +1,27 @@ +// exportimport3.js + +t = new ToolTest( "exportimport3" ); + +c = t.startDB( "foo" ); +assert.eq( 0 , c.count() , "setup1" ); +c.save({a:1}) +c.save({a:2}) +c.save({a:3}) +c.save({a:4}) +c.save({a:5}) + +assert.eq( 5 , c.count() , "setup2" ); + + +t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );; + +t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 5 , c.count() , "after restore 2" ); + + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js new file mode 100644 index 00000000000..c0d82a135bc --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js @@ -0,0 +1,57 @@ +// exportimport4.js + + +t = new ToolTest( "exportimport4" ); +c = t.startDB( "foo" ); + +install_test_data = function() { + c.drop(); + + assert.eq( 0 , c.count() , "setup1" ); + + c.save( { a : [1, 2, 3, NaN, 4, null, 5] } ); + c.save( { a : [1, 2, 3, 4, 5] } ); + c.save( { a : [ NaN ] } ); + c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } ); + c.save( { a : [1, 2, 3, 4, null, null, 5, null] } ); + + assert.eq( 5 , c.count() , "setup2" ); +}; + +// attempt to export fields without NaN +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 2 , c.count() , "after restore 1" ); + +// attempt to export fields with NaN +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 3 , c.count() , "after restore 2" ); + +// attempt to export everything +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 5 , c.count() , "after restore 3" ); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js new file mode 100644 index 00000000000..47dd98c2553 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js @@ -0,0 +1,82 @@ +// exportimport4.js + + +t = new ToolTest( "exportimport5" ); +c = t.startDB( "foo" ); + +install_test_data = function() { + c.drop(); + + assert.eq( 0 , c.count() , "setup1" ); + + c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } ); + c.save( { a : [1, 2, 3, 4, 5] } ); + c.save( { a : [ Infinity ] } ); + c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } ); + c.save( { a : [1, 2, 3, 4, null, null, 5, null] } ); + c.save( { a : [ -Infinity ] } ); + + assert.eq( 6 , c.count() , "setup2" ); +}; + +// attempt to export fields without Infinity +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 3 , c.count() , "after restore 1" ); + +// attempt to export fields with Infinity +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 3 , c.count() , "after restore 2" ); + +// attempt to export fields without -Infinity +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 4 , c.count() , "after restore 3" ); + +// attempt to export fields with -Infinity +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 2 , c.count() , "after restore 4" ); + +// attempt to export everything +install_test_data(); + +t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" ); + +c.drop(); +assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" ); + +t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" ); + +assert.eq( 6 , c.count() , "after restore 5" ); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js new file mode 100644 index 00000000000..a01d49a9c8b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js @@ -0,0 +1,26 @@ +// exportimport6.js +// test export with skip, limit and sort + +t = new ToolTest("exportimport6"); + +c = t.startDB("foo"); +assert.eq(0, c.count(), "setup1"); +c.save({a:1, b:1}) +c.save({a:1, b:2}) +c.save({a:2, b:3}) +c.save({a:2, b:3}) +c.save({a:3, b:4}) +c.save({a:3, b:5}) + +assert.eq(6, c.count(), "setup2"); + +t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", + "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1"); + +c.drop(); +assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo"); +t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo"); +assert.eq(1, c.count(), "count should be 1"); +assert.eq(5, c.findOne().b, printjson(c.findOne())); + +t.stop(); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js new file mode 100644 index 00000000000..43a209b8453 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js @@ -0,0 +1,62 @@ +// Test importing collections represented as a single line array above the maximum document size +var tt = new ToolTest('exportimport_bigarray_test'); + +var exportimport_db = tt.startDB(); + +var src = exportimport_db.src; +var dst = exportimport_db.dst; + +src.drop(); +dst.drop(); + +// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe) +var bigString = new Array(1025).toString(); +var doc = {_id: new ObjectId(), x:bigString}; +var docSize = Object.bsonsize(doc); +var numDocs = Math.floor(20*1024*1024 / docSize); + +print('Size of one document: ' + docSize) +print('Number of documents to exceed maximum BSON size: ' + numDocs) + +print('About to insert ' + numDocs + ' documents into ' + + exportimport_db.getName() + '.' + src.getName()); +var i; +for (i = 0; i < numDocs; ++i) { + src.insert({ x : bigString }); +} +var lastError = exportimport_db.getLastError(); +if (lastError == null) { + print('Finished inserting ' + numDocs + ' documents'); +} +else { + doassert('Insertion failed: ' + lastError); +} + +data = 'data/exportimport_array_test.json'; + +print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() + + ' with file: ' + data); +tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(), + '--jsonArray'); + +print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() + + ' with file: ' + data); +tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(), + '--jsonArray'); + +print('About to verify that source and destination collections match'); + +src_cursor = src.find().sort({ _id : 1 }); +dst_cursor = dst.find().sort({ _id : 1 }); + +var documentCount = 0; +while (src_cursor.hasNext()) { + assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' + + 'Destination has ' + documentCount + ' documents.'); + assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount); + ++documentCount; +} +assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' + + 'Source has ' + documentCount + ' documents.'); + +print('Verified that source and destination collections match'); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js new file mode 100644 index 00000000000..57a860ca1a8 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js @@ -0,0 +1,49 @@ +var tt = new ToolTest('exportimport_date_test'); + +var exportimport_db = tt.startDB(); + +var src = exportimport_db.src; +var dst = exportimport_db.dst; + +src.drop(); +dst.drop(); + +// Insert a date that we can format +var formatable = ISODate("1970-01-02T05:00:00Z"); +assert.eq(formatable.valueOf(), 104400000); +src.insert({ "_id" : formatable }); + +// Insert a date that we cannot format as an ISODate string +var nonformatable = ISODate("3001-01-01T00:00:00Z"); +assert.eq(nonformatable.valueOf(), 32535216000000); +src.insert({ "_id" : nonformatable }); + +// Verify number of documents inserted +assert.eq(2, src.find().itcount()); + +data = 'data/exportimport_date_test.json'; + +print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() + + ' with file: ' + data); +tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName()); + +print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() + + ' with file: ' + data); +tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName()); + +print('About to verify that source and destination collections match'); + +src_cursor = src.find().sort({ _id : 1 }); +dst_cursor = dst.find().sort({ _id : 1 }); + +var documentCount = 0; +while (src_cursor.hasNext()) { + assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' + + 'Destination has ' + documentCount + ' documents.'); + assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount); + ++documentCount; +} +assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' + + 'Source has ' + documentCount + ' documents.'); + +print('Verified that source and destination collections match'); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/files1.js b/src/mongo/gotools/test/legacy26/jstests/tool/files1.js new file mode 100644 index 00000000000..acfcc16dcc3 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/files1.js @@ -0,0 +1,27 @@ +// files1.js + +t = new ToolTest( "files1" ) + +db = t.startDB(); + +filename = 'mongod' +if ( _isWindows() ) + filename += '.exe' + +t.runTool( "files" , "-d" , t.baseName , "put" , filename ); +md5 = md5sumFile(filename); + +file_obj = db.fs.files.findOne() +assert( file_obj , "A 0" ); +md5_stored = file_obj.md5; +md5_computed = db.runCommand({filemd5: file_obj._id}).md5; +assert.eq( md5 , md5_stored , "A 1" ); +assert.eq( md5 , md5_computed, "A 2" ); + +mkdir(t.ext); + +t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile ); +md5 = md5sumFile(t.extFile); +assert.eq( md5 , md5_stored , "B" ); + +t.stop() diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js b/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js new file mode 100644 index 00000000000..e9a002bfb65 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js @@ -0,0 +1,26 @@ +// oplog1.js + +// very basic test for mongooplog +// need a lot more, but test that it functions at all + +t = new ToolTest( "oplog1" ); + +db = t.startDB(); + +output = db.output + +doc = { _id : 5 , x : 17 }; + +db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } ); + +assert.eq( 0 , output.count() , "before" ) + +t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" ); + +assert.eq( 1 , output.count() , "after" ); + +assert.eq( doc , output.findOne() , "after check" ); + +t.stop(); + + diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js b/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js new file mode 100644 index 00000000000..8f231cb233d --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js @@ -0,0 +1,61 @@ +/** + * Performs a simple test on mongooplog by doing different types of operations + * that will show up in the oplog then replaying it on another replica set. + * Correctness is verified using the dbhash command. + */ + +var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' }, + { arbiter: true }, { arbiter: true }]}); + +repl1.startSet({ oplogSize: 10 }); +repl1.initiate(); +repl1.awaitSecondaryNodes(); + +var repl1Conn = new Mongo(repl1.getURL()); +var testDB = repl1Conn.getDB('test'); +var testColl = testDB.user; + +// op i +testColl.insert({ x: 1 }); +testColl.insert({ x: 2 }); + +// op c +testDB.dropDatabase(); + +testColl.insert({ y: 1 }); +testColl.insert({ y: 2 }); +testColl.insert({ y: 3 }); + +// op u +testColl.update({}, { $inc: { z: 1 }}, true, true); + +// op d +testColl.remove({ y: 2 }); + +// op n +var oplogColl = repl1Conn.getCollection('local.oplog.rs'); +oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }}); + +var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' }, + { arbiter: true }, { arbiter: true }]}); + +repl2.startSet({ oplogSize: 10 }); +repl2.initiate(); +repl2.awaitSecondaryNodes(); + +var srcConn = repl1.getPrimary(); +runMongoProgram('mongooplog', '--from', repl1.getPrimary().host, + '--host', repl2.getPrimary().host); + +var repl1Hash = testDB.runCommand({ dbhash: 1 }); + +var repl2Conn = new Mongo(repl2.getURL()); +var testDB2 = repl2Conn.getDB(testDB.getName()); +var repl2Hash = testDB2.runCommand({ dbhash: 1 }); + +assert(repl1Hash.md5); +assert.eq(repl1Hash.md5, repl2Hash.md5); + +repl1.stopSet(); +repl2.stopSet(); + diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js b/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js new file mode 100644 index 00000000000..ac9e7bc756b --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js @@ -0,0 +1,113 @@ +/* SERVER-4972 + * Test for mongorestore on server with --auth allows restore without credentials of colls + * with no index + */ +/* + * 1) Start mongo without auth. + * 2) Write to collection + * 3) Take dump of the collection using mongodump. + * 4) Drop the collection. + * 5) Stop mongod from step 1. + * 6) Restart mongod with auth. + * 7) Add admin user to kick authentication + * 8) Try restore without auth credentials. The restore should fail + * 9) Try restore with correct auth credentials. The restore should succeed this time. + */ + +var port = allocatePorts(1)[0]; +baseName = "jstests_restorewithauth"; +var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", + "--nojournal", "--bind_ip", "127.0.0.1" ); + +// write to ns foo.bar +var foo = conn.getDB( "foo" ); +for( var i = 0; i < 4; i++ ) { + foo["bar"].save( { "x": i } ); + foo["baz"].save({"x": i}); +} + +// make sure the collection exists +assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 ) + +//make sure it has no index except _id +assert.eq(foo.system.indexes.count(), 2); + +foo.bar.createIndex({x:1}); +assert.eq(foo.system.indexes.count(), 3); + +// get data dump +var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/"; +resetDbpath( dumpdir ); +x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir); + +// now drop the db +foo.dropDatabase(); + +// stop mongod +stopMongod( port ); + +// start mongod with --auth +conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", + "--nojournal", "--bind_ip", "127.0.0.1" ); + +// admin user +var admin = conn.getDB( "admin" ) +admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles}); +admin.auth( "admin" , "admin" ); + +var foo = conn.getDB( "foo" ) + +// make sure no collection with the same name exists +assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0); +assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0); + +// now try to restore dump +x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" ); + +// make sure that the collection isn't restored +assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0); +assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0); + +// now try to restore dump with correct credentials +x = runMongoProgram( "mongorestore", + "-h", "127.0.0.1:" + port, + "-d", "foo", + "--authenticationDatabase=admin", + "-u", "admin", + "-p", "admin", + "--dir", dumpdir + "foo/", + "-vvvvv"); + +// make sure that the collection was restored +assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1); +assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1); + +// make sure the collection has 4 documents +assert.eq(foo.bar.count(), 4); +assert.eq(foo.baz.count(), 4); + +foo.dropDatabase(); + +// make sure that the collection is empty +assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0); +assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0); + +foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}); + +// now try to restore dump with foo database credentials +x = runMongoProgram("mongorestore", + "-h", "127.0.0.1:" + port, + "-d", "foo", + "-u", "user", + "-p", "password", + "--dir", dumpdir + "foo/", + "-vvvvv"); + +// make sure that the collection was restored +assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1); +assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1); +assert.eq(foo.bar.count(), 4); +assert.eq(foo.baz.count(), 4); +assert.eq(foo.system.indexes.count(), 3); // _id on foo, _id on bar, x on foo + +stopMongod( port ); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js b/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js new file mode 100644 index 00000000000..539827e1704 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js @@ -0,0 +1,22 @@ +// stat1.js +// test mongostat with authentication SERVER-3875 +port = allocatePorts( 1 )[ 0 ]; +baseName = "tool_stat1"; + +m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); +db = m.getDB( "admin" ); + +t = db[ baseName ]; +t.drop(); + +db.dropAllUsers(); + +db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles}); + +assert( db.auth( "eliot" , "eliot" ) , "auth failed" ); + +x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase=admin" ); +assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot"); + +x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase=admin" ); +assert.eq(x, 1, "mongostat should exit with 1 with eliot:wrong"); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js b/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js new file mode 100644 index 00000000000..f7c6f769e72 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js @@ -0,0 +1,44 @@ +// mongo tool tests, very basic to start with + + +baseName = "jstests_tool_tool1"; +dbPath = MongoRunner.dataPath + baseName + "/"; +externalPath = MongoRunner.dataPath + baseName + "_external/"; +externalBaseName = "export.json"; +externalFile = externalPath + externalBaseName; + +function fileSize(){ + var l = listFiles( externalPath ); + for ( var i=0; i<l.length; i++ ){ + if ( l[i].baseName == externalBaseName ) + return l[i].size; + } + return -1; +} + + +port = allocatePorts( 1 )[ 0 ]; +resetDbpath( externalPath ); + +m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" ); +c = m.getDB( baseName ).getCollection( baseName ); +c.save( { a: 1 } ); +assert( c.findOne() ); + +runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath ); +c.drop(); +runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath ); +assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" ); +assert( c.findOne() , "mongodump then restore has no data" ); +assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" ); + +resetDbpath( externalPath ); + +assert.eq( -1 , fileSize() , "mongoexport prep invalid" ); +runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile ); +assert.lt( 10 , fileSize() , "file size changed" ); + +c.drop(); +runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile ); +assert.soon( "c.findOne()" , "mongo import json A" ); +assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" ); diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js b/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js new file mode 100644 index 00000000000..bc50a0fd7d4 --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js @@ -0,0 +1,89 @@ +/* + * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string + * 1. Start a replica set. + * 2. Add data to a collection. + * 3. Take a dump of the database. + * 4. Drop the db. + * 5. Restore the db. + * 6. Export a collection. + * 7. Drop the collection. + * 8. Import the collection. + * 9. Add data to the oplog.rs collection. + * 10. Ensure that the document doesn't exist yet. + * 11. Now play the mongooplog tool. + * 12. Make sure that the oplog was played +*/ + +// Load utility methods for replica set tests +load("jstests/replsets/rslib.js"); + +print("starting the replica set") + +var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 }); +var nodes = replTest.startSet(); +replTest.initiate(); +var master = replTest.getPrimary(); +for (var i = 0; i < 100; i++) { + master.getDB("foo").bar.insert({ a: i }); +} +replTest.awaitReplication(); + +var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] + + ",127.0.0.1:" + replTest.ports[1]; + +// Test with mongodump/mongorestore +print("dump the db"); +var data = MongoRunner.dataDir + "/tool_replset-dump1/"; +runMongoProgram("mongodump", "--host", replSetConnString, "--out", data); + +print("db successfully dumped, dropping now"); +master.getDB("foo").dropDatabase(); +replTest.awaitReplication(); + +print("restore the db"); +runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data); + +print("db successfully restored, checking count") +var x = master.getDB("foo").getCollection("bar").count(); +assert.eq(x, 100, "mongorestore should have successfully restored the collection"); + +replTest.awaitReplication(); + +// Test with mongoexport/mongoimport +print("export the collection"); +var extFile = MongoRunner.dataDir + "/tool_replset/export"; +runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile, + "-d", "foo", "-c", "bar"); + +print("collection successfully exported, dropping now"); +master.getDB("foo").getCollection("bar").drop(); +replTest.awaitReplication(); + +print("import the collection"); +runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile, + "-d", "foo", "-c", "bar"); + +var x = master.getDB("foo").getCollection("bar").count(); +assert.eq(x, 100, "mongoimport should have successfully imported the collection"); + +// Test with mongooplog +var doc = { _id : 5, x : 17 }; +master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar", + "o" : doc }); + +assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " + + "was not 100 as expected"); + +runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0], + "--host", replSetConnString); + +print("running mongooplog to replay the oplog") + +assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " + + "was not 101 as expected") + +print("all tests successful, stopping replica set") + +replTest.stopSet(); + +print("replica set stopped, test complete") diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js b/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js new file mode 100644 index 00000000000..1b0ddbb7c9e --- /dev/null +++ b/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js @@ -0,0 +1,32 @@ +// tsv1.js + +t = new ToolTest( "tsv1" ) + +c = t.startDB( "foo" ); + +base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 }; + +t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" ); +assert.soon( "2 == c.count()" , "restore 2" ); + +a = c.find().sort( { a : 1 } ).toArray(); +delete a[0]._id +delete a[1]._id + +assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" ); +assert.eq( base , a[0] , "tsv parse 0" ) + +c.drop() +assert.eq( 0 , c.count() , "after drop 2" ) + +t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" ) +assert.soon( "c.findOne()" , "no data after sleep" ); +assert.eq( 1 , c.count() , "after restore 2" ); + +x = c.findOne() +delete x._id; +assert.eq( base , x , "tsv parse 2" ) + + + +t.stop() |