summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam Helman <sam.helman@10gen.com>2014-11-21 13:50:56 -0500
committerSam Helman <sam.helman@10gen.com>2014-11-21 14:31:54 -0500
commited1065a6084728d6b21cd321e086350595f8f8cd (patch)
treef43a489a607312742715dff729f6bffb28af78ad
parentc32e7643152c7c5f21078a7e45ad5ebc68e0feb0 (diff)
downloadmongo-ed1065a6084728d6b21cd321e086350595f8f8cd.tar.gz
TOOLS-403 wip: added starter qa-tests task
Former-commit-id: 67ec539c19073697f3e0b13a2e2492ae24d7a474
-rw-r--r--common.yml44
-rw-r--r--test/qa-tests/buildscripts/buildlogger.py479
-rw-r--r--test/qa-tests/buildscripts/cleanbb.py105
-rwxr-xr-xtest/qa-tests/buildscripts/smoke.py1343
-rw-r--r--test/qa-tests/buildscripts/utils.py235
5 files changed, 2206 insertions, 0 deletions
diff --git a/common.yml b/common.yml
index bd5b2397ab0..0423fe601ac 100644
--- a/common.yml
+++ b/common.yml
@@ -889,6 +889,50 @@ tasks:
value: "common/progress"
- func: "run unit test"
+- name: qa-tests
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ mv ./mongodb/mongod${extension} .
+ mv ./mongodb/mongo${extension} .
+ mv test/qa-tests/* .
+ chmod +x mongo*
+ python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} --authMechanism SCRAM-SHA-1 mongorestore
+
- name: text
commands:
- command: git.get_project
diff --git a/test/qa-tests/buildscripts/buildlogger.py b/test/qa-tests/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..d2466e495c0
--- /dev/null
+++ b/test/qa-tests/buildscripts/buildlogger.py
@@ -0,0 +1,479 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/test/qa-tests/buildscripts/cleanbb.py b/test/qa-tests/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/test/qa-tests/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/test/qa-tests/buildscripts/smoke.py b/test/qa-tests/buildscripts/smoke.py
new file mode 100755
index 00000000000..f43c7fbcf5b
--- /dev/null
+++ b/test/qa-tests/buildscripts/smoke.py
@@ -0,0 +1,1343 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import Connection
+from pymongo.errors import OperationFailure
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = Connection(port=self.port, slave_okay=True).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ Connection(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ mongod.dbhash = Connection(port=mongod.port, slave_okay=True).test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = Connection(port=master.port, slave_okay=True).test
+ sTestDB = Connection(port=slave.port, slave_okay=True).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True, set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = Connection(port=master.port, slave_okay=True);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "mmap_v1": ("mmap_v1/*.js", True),
+ "gle": ("gle/*.js", True),
+ "rocksDB": ("rocksDB/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/test/qa-tests/buildscripts/utils.py b/test/qa-tests/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/test/qa-tests/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+