summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron <aaron@10gen.com>2010-04-08 00:16:37 -0700
committerAaron <aaron@10gen.com>2010-04-08 00:16:37 -0700
commitec721d26e0ca3fe324ae5e67f54b87f6714abe4a (patch)
treed33666328d1c8852043cf73aaa3340a395fdd6c0
parente18041f47b0b3a885f9e54e35925572c2e28995a (diff)
parent5cad0d6a80c2b94e877f56bc7f67b3ce825ee094 (diff)
downloadmongo-ec721d26e0ca3fe324ae5e67f54b87f6714abe4a.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
-rw-r--r--SConstruct2
-rwxr-xr-xbuildscripts/makealldists.py263
-rwxr-xr-x[-rw-r--r--]buildscripts/makedist.py136
-rwxr-xr-xbuildscripts/mergerepositories.py163
-rw-r--r--client/dbclient.cpp38
-rw-r--r--client/dbclient.h1
-rw-r--r--db/client.cpp17
-rw-r--r--db/client.h2
-rw-r--r--db/clientcursor.cpp1
-rw-r--r--db/db.cpp2
-rw-r--r--db/db.vcproj4
-rw-r--r--db/jsobj.cpp27
-rw-r--r--db/namespace.cpp3
-rw-r--r--db/pdfile.cpp6
-rw-r--r--db/pdfile.h16
-rw-r--r--dbtests/socktests.cpp1
-rw-r--r--dbtests/test.vcproj4
-rw-r--r--jstests/pullall2.js20
-rw-r--r--jstests/repl/snapshot3.js4
-rw-r--r--s/config.cpp2
-rw-r--r--shell/dbshell.cpp8
-rw-r--r--util/goodies.h4
-rw-r--r--util/message.cpp8
-rw-r--r--util/message.h7
-rw-r--r--util/miniwebserver.cpp2
-rw-r--r--util/mmap.h14
-rw-r--r--util/ramstore.cpp93
-rw-r--r--util/ramstore.h40
-rw-r--r--util/sock.cpp61
-rw-r--r--util/sock.h26
30 files changed, 818 insertions, 157 deletions
diff --git a/SConstruct b/SConstruct
index 8195f7726f4..cb0dea589f2 100644
--- a/SConstruct
+++ b/SConstruct
@@ -360,7 +360,7 @@ if GetOption( "extralib" ) is not None:
# ------ SOURCE FILE SETUP -----------
commonFiles = Split( "stdafx.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.cpp db/lasterror.cpp db/nonce.cpp db/queryutil.cpp shell/mongo.cpp" )
-commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
+commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/ramstore.cpp", "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
"util/assert_util.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/debug_util.cpp",
"util/thread_pool.cpp" ]
commonFiles += Glob( "util/*.c" )
diff --git a/buildscripts/makealldists.py b/buildscripts/makealldists.py
new file mode 100755
index 00000000000..0a3c5207a08
--- /dev/null
+++ b/buildscripts/makealldists.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+import subprocess
+import sys
+import os
+import time
+import tempfile
+import errno
+import glob
+import shutil
+import settings
+import simples3
+
+def s3bucket():
+ return simples3.S3Bucket(settings.bucket, settings.id, settings.key)
+
+def s3cp (bucket, filename, s3name):
+ defaultacl="public-read"
+ bucket.put(s3name, open(filename, "rb").read(), acl=defaultacl)
+
+def pushrepo(repodir):
+ files=subprocess.Popen(['find', repodir, '-name', 'Packages*', '-o', '-name', '*.deb', '-o', '-name', 'Release*'], stdout=subprocess.PIPE).communicate()[0][:-1].split('\n')
+ bucket=s3bucket()
+ olddebs=[t[0] for t in bucket.listdir(prefix='distros/') if t[0].endswith('.deb')]
+ for fn in files:
+ tail = fn[len(repodir):]
+ # Note: be very careful not to produce s3names containing
+ # sequences of repeated slashes: s3 doesn't treat a////b as
+ # equivalent to a/b.
+ s3name='distros-archive/'+time.strftime('%Y%m%d')+tail
+ #print fn, s3name
+ s3cp(bucket, fn, s3name)
+ s3name='distros'+tail
+ s3cp(bucket, fn, s3name)
+ # FIXME: delete the old
+ [bucket.delete(olddeb) for olddeb in olddebs]
+
+ shutil.rmtree(outputroot)
+ shutil.rmtree(mergedir)
+ shutil.rmtree(repodir)
+
+def cat (inh, outh):
+ inh.seek(0)
+ for line in inh:
+ outh.write(line)
+ inh.close()
+
+# This generates all tuples from mixed-radix counting system, essentially.
+def gen(listlist):
+ dim=len(listlist)
+ a=[0 for ignore in listlist]
+ while True:
+ yield [listlist[i][a[i]] for i in range(dim)]
+ a[0]+=1
+ for j in range(dim):
+ if a[j] == len(listlist[j]):
+ if j<dim-1:
+ a[j+1]+=1
+ else:
+ return
+ a[j]=0
+
+def dirify(string):
+ return (string if string[-1:] in '\/' else string+'/')
+def fileify(string):
+ return (string if string[-1:] not in '\/' else string.rstrip('\/'))
+
+# WTF: os.makedirs errors if the leaf exists?
+def makedirs(f):
+ try:
+ os.makedirs(f)
+ except OSError as exc: # Python >2.5
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+
+
+
+# This is a fairly peculiar thing to want to do, but our build process
+# creates several apt repositories for each mongo version we build on
+# any given Debian/Ubutnu release. To merge repositories together, we
+# must concatenate the Packages.gz files.
+def merge_directories_concatenating_conflicts (target, sources):
+ print sources
+ target = dirify(target)
+ for source in sources:
+ source = dirify(source)
+ files = subprocess.Popen(["find", source, "-type", "f"], stdout=subprocess.PIPE).communicate()[0].split('\n')
+ for f in files:
+ if f == '':
+ continue
+ rel = f[len(source):]
+ o=target+rel
+ makedirs(os.path.dirname(o))
+ with open(f) as inh:
+ with open(target+rel, "a") as outh:
+ outh.write(inh.read())
+
+
+def parse_mongo_version_spec(spec):
+ l = spec.split(':')
+ if len(l) == 1:
+ l+=['','']
+ elif len(l) == 2:
+ l+=['']
+ return l
+
+def logfh(distro, distro_version, arch, mongo_version):
+ prefix = "%s-%s-%s-%s.log." % (distro, distro_version, arch, mongo_version)
+ # This is a NamedTemporaryFile mostly so that I can tail(1) them
+ # as we go.
+ return tempfile.NamedTemporaryFile("w+b", -1, prefix=prefix)
+
+def spawn(distro, distro_version, arch, spec, directory, opts):
+ (mongo_version, suffix, pkg_version) = parse_mongo_version_spec(spec)
+ argv = ["makedist.py"] + opts + [ directory, distro, distro_version, arch ] + [ spec ]
+# cmd = "mkdir -p %s; cd %s; touch foo.deb; echo %s %s %s %s %s | tee Packages " % ( directory, directory, directory, distro, distro_version, arch, mongo_version )
+# print cmd
+# argv = ["sh", "-c", cmd]
+ fh = logfh(distro, distro_version, arch, mongo_version)
+ print >> fh, "Running %s" % argv
+ # it's often handy to be able to run these things at the shell
+ # manually. FIXME: this ought to be slightly less than thoroughly
+ # ignorant of quoting issues (as is is now).
+ print >> fh, " ".join(argv)
+ fh.flush()
+ proc = subprocess.Popen(argv, stdin=None, stdout=fh, stderr=fh)
+ return (proc, fh, distro, distro_version, arch, spec)
+
+def win(name, logfh, winfh):
+ logfh.seek(0)
+ print >> winfh, "=== Winner %s ===" % name
+ cat(logfh, winfh)
+ print >> winfh, "=== End winner %s ===" % name
+
+def lose(name, logfh, losefh):
+ logfh.seek(0)
+ print >> losefh, "=== Loser %s ===" % name
+ cat(logfh, losefh)
+ print >> losefh, "=== End loser %s ===" % name
+
+def wait(procs, winfh, losefh, winners, losers):
+ try:
+ (pid, stat) = os.wait()
+ except OSError, err:
+ print >> sys.stderr, "This shouldn't happen."
+ print >> sys.stderr, err
+ next
+ if pid:
+ [tup] = [tup for tup in procs if tup[0].pid == pid]
+ (proc, logfh, distro, distro_version, arch, mongo_version) = tup
+ procs.remove(tup)
+ name = "%s %s %s %s" % (distro, distro_version, arch, mongo_version)
+ if os.WIFEXITED(stat):
+ if os.WEXITSTATUS(stat) == 0:
+ win(name, logfh, winfh)
+ winners.append(name)
+ else:
+ lose(name, logfh, losefh)
+ losers.append(name)
+ if os.WIFSIGNALED(stat):
+ lose(name, logfh, losefh)
+ losers.append(name)
+
+
+
+def __main__():
+ # FIXME: getopt & --help.
+ print " ".join(sys.argv)
+ branches = sys.argv[-1]
+ makedistopts = sys.argv[1:-1]
+
+ # Output from makedist.py goes here.
+ outputroot=tempfile.mkdtemp()
+ mergedir=tempfile.mkdtemp()
+ repodir=tempfile.mkdtemp()
+
+ print "makedist output under: %s\nmerge directory: %s\ncombined repo: %s\n" % (outputroot, mergedir, repodir)
+ # Add more dist/version/architecture tuples as they're supported.
+ dists = (("ubuntu", "10.4"),
+ ("ubuntu", "9.10"),
+ ("ubuntu", "9.4"),
+ ("ubuntu", "8.10"),
+ ("debian", "5.0"))
+ arches = ("x86", "x86_64")
+ mongos = branches.split(',')
+ # Run a makedist for each distro/version/architecture tuple above.
+ winners = []
+ losers = []
+ winfh=tempfile.TemporaryFile()
+ losefh=tempfile.TemporaryFile()
+ procs = []
+ count = 0
+ for ((distro, distro_version), arch, spec) in gen([dists, arches, mongos]):
+ count+=1
+ (mongo_version,_,_) = parse_mongo_version_spec(spec)
+ # blech: the "Packages.gz" metadata files in a Debian
+ # repository will clobber each other unless we make a
+ # different "repository" for each mongo version we're
+ # building.
+ if distro in ["debian", "ubuntu"]:
+ outputdir = "%s/%s/%s" % (outputroot, mongo_version, distro)
+ else:
+ outputdir = outputroot
+ makedistopts += "--subdirs"
+
+ procs.append(spawn(distro, distro_version, arch, spec, outputdir, makedistopts))
+
+ if len(procs) == 8:
+ wait(procs, winfh, losefh, winners, losers)
+
+ while procs:
+ wait(procs, winfh, losefh, winners, losers)
+
+ winfh.seek(0)
+ losefh.seek(0)
+ nwinners=len(winners)
+ nlosers=len(losers)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ cat(winfh, sys.stdout)
+ cat(losefh, sys.stdout)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ if count == nwinners + nlosers:
+ print "All jobs accounted for"
+# return 0
+ else:
+ print "Lost some jobs...?"
+ return 1
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ merge_directories_concatenating_conflicts(mergedir, glob.glob(outputroot+'/*'))
+
+ argv=["mergerepositories.py", mergedir, repodir]
+ print "running %s" % argv
+ print " ".join(argv)
+ r = subprocess.Popen(argv).wait()
+ if r != 0:
+ raise Exception("mergerepositories.py exited %d" % r)
+ print repodir
+ pushrepo(repodir)
+
+ return 0
+
+
+if __name__ == '__main__':
+ __main__()
+
+
+# FIXME: this ought to be someplace else.
+
+# FIXME: remove this comment when the buildbot does this. After this
+# program, run something that amounts to
+#
+# find /tmp/distros -name *.deb -or -name Packages.gz | while read f; do echo "./s3cp.py $f ${f#/tmp/}"; done
+#
+# where ./s3cp.py is a trivial s3 put executable in this directory.
+
+# merge_directories_concatenating_conflicts('/tmp/distros/debian', '/tmp/distros-20100222/debian/HEAD', '/tmp/distros-20100222/debian/r1.3.2','/tmp/distros-20100222/debian/v1.2')
+
+# merge_directories_concatenating_conflicts('/tmp/distros/ubuntu', '/tmp/distros-20100222/ubuntu/HEAD', '/tmp/distros-20100222/ubuntu/r1.3.2', '/tmp/distros-20100222/ubuntu/v1.2')
diff --git a/buildscripts/makedist.py b/buildscripts/makedist.py
index 35383b9f7b8..cfdcb58b752 100644..100755
--- a/buildscripts/makedist.py
+++ b/buildscripts/makedist.py
@@ -287,6 +287,7 @@ class SshConnectionConfigurator (BaseConfigurator):
(("ubuntu", "9.4", "*"), "root"),
(("ubuntu", "8.10", "*"), "root"),
(("ubuntu", "8.4", "*"), "ubuntu"),
+ (("fedora", "8", "*"), "root"),
(("centos", "*", "*"), "root"))),
]
@@ -402,7 +403,7 @@ s/^Package:.*mongodb/Package: {pkg_name}{pkg_name_suffix}\\
Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|$(CURDIR)/debian/mongodb/|$(CURDIR)/debian/{pkg_name}{pkg_name_suffix}/|g' debian/rules) || exit 1
( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's|debian/mongodb.manpages|debian/{pkg_name}{pkg_name_suffix}.manpages|g' debian/rules) || exit 1
-( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}/; /^Version:/s/.*/Version: {pkg_version}/;' rpm/mongo.spec )
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/^Name:/s/.*/Name: {pkg_name}{pkg_name_suffix}/; /^Version:/s/.*/Version: {pkg_version}/; /Requires.*mongo/s/mongo/{pkg_name}{pkg_name_suffix}/;' rpm/mongo.spec )
# Debian systems require some ridiculous workarounds to get an init
# script at /etc/init.d/mongodb when the packge name isn't the init
# script name. Note: dh_installinit --name won't work, because that
@@ -412,6 +413,20 @@ Conflicts: {pkg_name_conflicts}/' debian/control; ) || exit 1
ln debian/init.d debian/{pkg_name}{pkg_name_suffix}.mongodb.init &&
ln debian/mongodb.upstart debian/{pkg_name}{pkg_name_suffix}.mongodb.upstart &&
sed -i 's/dh_installinit/dh_installinit --name=mongodb/' debian/rules) || exit 1
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
+"""
+
+ # If we're just packaging up nightlies, do this:
+ nightly_build_mangle_files="""
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i '/scons[[:space:]]*$/d; s^scons.*install^mkdir -p debian/{pkg_name}{pkg_name_suffix} \&\& wget http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG mv -v ARG debian/{pkg_name}{pkg_name_suffix}/usr \&\& (rm debian/{pkg_name}{pkg_name_suffix}/usr/bin/mongosniff || true)^' debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/^BuildRequires:.*//; s/scons.*\ -c//; s/scons.*\ all//; s^scons.*install^(mkdir -p $RPM_BUILD_ROOT/usr ; cd /tmp \&\& curl http://downloads.mongodb.org/linux/mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz > mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& tar xzvf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz \&\& find `tar tzf mongodb-linux-{mongo_arch}-{mongo_pub_version}.tgz | sed "s|/.*||" | sort -u | head -n1` -mindepth 1 -maxdepth 1 -type d | xargs -n1 -IARG cp -pRv ARG $RPM_BUILD_ROOT/usr \&\& (rm $RPM_BUILD_ROOT/usr/bin/mongosniff || true))^' rpm/mongo.spec)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat debian/rules)
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && cat rpm/mongo.spec)
+"""
+#$RPM_BUILD_ROOT/usr/lib/libmongoclient.a $RPM_BUILD_ROOT/usr/lib64/libmongoclient.a
+ mangle_files_for_new_deb_xulrunner_commands = """
+( cd "{pkg_name}{pkg_name_suffix}-{pkg_version}" && sed -i 's/xulrunner-dev/xulrunner-1.9.1-dev/g' debian/control )
"""
mangle_files_for_ancient_redhat_commands = """
@@ -446,9 +461,10 @@ yum -y install {pkg_prereq_str}
"""
rpm_build_commands="""
for d in BUILD BUILDROOT RPMS SOURCES SPECS SRPMS; do mkdir -p /usr/src/redhat/$d; done
-cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" /usr/src/redhat/SPECS
+cp -v "{pkg_name}{pkg_name_suffix}-{pkg_version}/rpm/mongo.spec" /usr/src/redhat/SPECS/{pkg_name}{pkg_name_suffix}.spec
tar -cpzf /usr/src/redhat/SOURCES/"{pkg_name}{pkg_name_suffix}-{pkg_version}".tar.gz "{pkg_name}{pkg_name_suffix}-{pkg_version}"
-rpmbuild -ba /usr/src/redhat/SPECS/mongo.spec
+rpmbuild -ba /usr/src/redhat/SPECS/{pkg_name}{pkg_name_suffix}.spec
+# FIXME: should install the rpms, check if mongod is running.
"""
# FIXME: this is clean, but adds 40 minutes or so to the build process.
old_rpm_precommands = """
@@ -474,25 +490,28 @@ rpm -ivh /usr/src/redhat/RPMS/{distro_arch}/boost-devel-1.38.0-1.{distro_arch}.r
# On very old Debianoids, libboost-<foo>-dev will be some old
# boost that's not as thready as we want, but which Eliot says
- # will work.
- very_old_deb_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner1.9-dev"]
+ # will work; on very new Debianoids, libbost-<foo>-dev is what we
+ # want.
+ unversioned_deb_boost_prereqs = ["libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev"]
+ # On some in-between Debianoids, libboost-<foo>-dev is still a
+ # 1.34, but 1.35 packages are available, so we want those.
+ versioned_deb_boost_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev"]
- # On less old (but still old!) Debianoids, libboost-<foo>-dev is
- # still a 1.34, but 1.35 packages are available, so we want those.
- old_deb_prereqs = ["libboost-thread1.35-dev", "libboost-filesystem1.35-dev", "libboost-program-options1.35-dev", "libboost-date-time1.35-dev", "libboost1.35-dev", "xulrunner-dev"]
+ unversioned_deb_xulrunner_prereqs = ["xulrunner-dev"]
- # On newer Debianoids, libbost-<foo>-dev is some sufficiently new
- # thing.
- new_deb_prereqs = [ "libboost-thread-dev", "libboost-filesystem-dev", "libboost-program-options-dev", "libboost-date-time-dev", "libboost-dev", "xulrunner-dev" ]
+ old_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9-dev"]
+ new_versioned_deb_xulrunner_prereqs = ["xulrunner-1.9.1-dev"]
common_deb_prereqs = [ "build-essential", "dpkg-dev", "libreadline-dev", "libpcap-dev", "libpcre3-dev", "git-core", "scons", "debhelper", "devscripts", "git-core" ]
centos_preqres = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
- fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git" ]
+ fedora_prereqs = ["js-devel", "readline-devel", "pcre-devel", "gcc-c++", "scons", "rpm-build", "git", "curl" ]
def __init__(self, **kwargs):
super(ScriptFileConfigurator, self).__init__(**kwargs)
- if kwargs["mongo_version"][0] == 'r':
+ # FIXME: this method is disabled until we get back around to
+ # actually building from source.
+ if None: # kwargs["mongo_version"][0] == 'r':
self.get_mongo_commands = """
wget -Otarball.tgz "http://github.com/mongodb/mongo/tarball/{mongo_version}";
tar xzf tarball.tgz
@@ -522,48 +541,90 @@ git clone git://github.com/mongodb/mongo.git
(("centos", "*", "*"), self.rpm_productdir))),
("pkg_prereqs",
((("ubuntu", "9.4", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "9.10", "*"),
- self.new_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "10.4", "*"),
- self.new_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.new_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "8.10", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("ubuntu", "8.4", "*"),
- self.very_old_deb_prereqs + self.common_deb_prereqs),
+ self.unversioned_deb_boost_prereqs + self.old_versioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("debian", "5.0", "*"),
- self.old_deb_prereqs + self.common_deb_prereqs),
+ self.versioned_deb_boost_prereqs + self.unversioned_deb_xulrunner_prereqs + self.common_deb_prereqs),
(("fedora", "8", "*"),
self.fedora_prereqs),
(("centos", "5.4", "*"),
self.centos_preqres))),
+ # FIXME: this is deprecated
("commands",
((("debian", "*", "*"),
- self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
- (("ubuntu", "*", "*"),
+ self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
+ (("ubuntu", "10.4", "*"),
+ self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands + self.deb_build_commands),
+ (("ubuntu", "*", "*"),
self.preamble_commands + self.deb_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.deb_build_commands),
(("centos", "*", "*"),
self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands + self.rpm_build_commands),
(("fedora", "*", "*"),
self.preamble_commands + self.old_rpm_precommands + self.rpm_prereq_commands + self.get_mongo_commands + self.mangle_files_commands + self.rpm_build_commands))),
+ ("preamble_commands",
+ ((("*", "*", "*"), self.preamble_commands),
+ )),
+ ("install_prereqs",
+ ((("debian", "*", "*"), self.deb_prereq_commands),
+ (("ubuntu", "*", "*"), self.deb_prereq_commands),
+ (("centos", "*", "*"), self.rpm_prereq_commands),
+ (("fedora", "*", "*"), self.rpm_prereq_commands))),
+ ("get_mongo",
+ ((("*", "*", "*"), self.get_mongo_commands),
+ )),
+ ("mangle_mongo",
+ ((("debian", "*", "*"), self.mangle_files_commands),
+ (("ubuntu", "10.4", "*"),
+ self.mangle_files_commands + self.mangle_files_for_new_deb_xulrunner_commands),
+ (("ubuntu", "*", "*"), self.mangle_files_commands),
+ (("centos", "*", "*"),
+ self.mangle_files_commands + self.mangle_files_for_ancient_redhat_commands),
+ (("fedora", "*", "*"),
+ self.mangle_files_commands))),
+ ("build_prerequisites",
+ ((("fedora", "*", "*"), self.old_rpm_precommands),
+ (("centos", "*", "*"), self.old_rpm_precommands),
+ (("*", "*", "*"), ''))),
+ ("install_for_packaging",
+ ((("debian", "*", "*"),""),
+ (("ubuntu", "*", "*"),""),
+ (("fedora", "*", "*"), ""),
+ (("centos", "*", "*"),""))),
+ ("build_package",
+ ((("debian", "*", "*"),
+ self.deb_build_commands),
+ (("ubuntu", "*", "*"),
+ self.deb_build_commands),
+ (("fedora", "*", "*"),
+ self.rpm_build_commands),
+ (("centos", "*", "*"),
+ self.rpm_build_commands))),
("pkg_name",
((("debian", "*", "*"), "mongodb"),
(("ubuntu", "*", "*"), "mongodb"),
(("centos", "*", "*"), "mongo"),
-
- (("fedora", "*", "*"), "mongo")
- )),
+ (("fedora", "*", "*"), "mongo"))),
+ # FIXME: there should be a command-line argument for this.
("pkg_name_conflicts",
- ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot"]),
- ))
- ]
+ ((("*", "*", "*"), ["", "-stable", "-unstable", "-snapshot", "-oldstable"]),
+ )),
+ ]
class ScriptFile(object):
def __init__(self, configurator, **kwargs):
- self.mongo_version = kwargs["mongo_version"]
+ self.mongo_version = kwargs["mongo_version"] if kwargs['mongo_version'][0] != 'n' else 'HEAD'
+ self.mongo_pub_version = kwargs["mongo_version"].lstrip('n') if kwargs['mongo_version'][0] in 'n' else 'latest'
+ self.mongo_arch = kwargs["arch"] if kwargs["arch"] == "x86_64" else "i686"
self.pkg_version = kwargs["pkg_version"]
self.pkg_name_suffix = kwargs["pkg_name_suffix"] if "pkg_name_suffix" in kwargs else ""
self.pkg_prereqs = configurator.default("pkg_prereqs")
@@ -571,7 +632,8 @@ class ScriptFile(object):
self.pkg_product_dir = configurator.default("pkg_product_dir")
self.pkg_name_conflicts = configurator.default("pkg_name_conflicts") if self.pkg_name_suffix else []
self.pkg_name_conflicts.remove(self.pkg_name_suffix) if self.pkg_name_suffix and self.pkg_name_suffix in self.pkg_name_conflicts else []
- self.formatter = configurator.default("commands")
+ #self.formatter = configurator.default("commands")
+ self.formatter = configurator.default("preamble_commands") + configurator.default("install_prereqs") + configurator.default("get_mongo") + configurator.default("mangle_mongo") + (configurator.nightly_build_mangle_files if kwargs['mongo_version'][0] == 'n' else '') +(configurator.default("build_prerequisites") if kwargs['mongo_version'][0] != 'n' else '') + configurator.default("install_for_packaging") + configurator.default("build_package")
self.distro_name = configurator.default("distro_name")
self.distro_version = configurator.default("distro_version")
self.distro_arch = configurator.default("distro_arch")
@@ -591,8 +653,10 @@ class ScriptFile(object):
# comma-separated conflicts,
# but there's no reason to
# suppose this works elsewhere
- pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in self.pkg_name_conflicts])
- )
+ pkg_name_conflicts = ", ".join([self.pkg_name+conflict for conflict in self.pkg_name_conflicts]),
+ mongo_arch=self.mongo_arch,
+ mongo_pub_version=self.mongo_pub_version
+)
def __enter__(self):
self.localscript=None
@@ -632,7 +696,7 @@ def main():
for key in ["EC2_HOME", "JAVA_HOME"]:
if key in settings.makedist:
os.environ[key] = settings.makedist[key]
- for key in ["ec2_pkey", "ec2_cert", "ec2_sshkey", "ssh_keyfile" ]:
+ for key in ["ec2_pkey", "ec2_cert", "ec2_sshkey", "ssh_keyfile", "gpg_homedir" ]:
if key not in kwargs and key in settings.makedist:
kwargs[key] = settings.makedist[key]
except Exception, err:
@@ -681,7 +745,7 @@ def main():
kwargs["pkg_name_suffix"] = ""
- kwargs['local_gpg_dir'] = kwargs["local_gpg_dir"] if "local_gpg_dir" in kwargs else os.path.expanduser("~/.gnupg")
+ kwargs['gpg_homedir'] = kwargs["gpg_homedir"] if "gpg_homedir" in kwargs else os.path.expanduser("~/.gnupg")
configurator = Configurator(**kwargs)
LocalHost.runLocally(["mkdir", "-p", kwargs["localdir"]])
with ScriptFile(configurator, **kwargs) as script:
@@ -697,7 +761,7 @@ def main():
ssh.runRemotely(["mkdir", "pkg"])
if "local_mongo_dir" in kwargs:
ssh.sendFiles([(kwargs["local_mongo_dir"]+'/'+d, "pkg") for d in ["rpm", "debian"]])
- ssh.sendFiles([(kwargs['local_gpg_dir'], ".gnupg")])
+ ssh.sendFiles([(kwargs['gpg_homedir'], ".gnupg")])
ssh.sendFiles([(script.localscript, "makedist.sh")])
ssh.runRemotely((["sudo"] if ssh.ssh_login != "root" else [])+ ["sh", "makedist.sh"])
ssh.recvFiles([(script.pkg_product_dir, kwargs['localdir'])])
@@ -709,7 +773,7 @@ def processArguments():
("N", "no-terminate", False, "Leave the EC2 instance running at the end of the job", None),
("S", "subdirs", False, "Create subdirectories of the output directory based on distro name, version, and architecture", None),
("I", "use-internal-name", False, "Use the EC2 internal hostname for sshing", None),
- (None, "local-gpg-dir", True, "Local directory of gpg junk", "STRING"),
+ (None, "gpg-homedir", True, "Local directory of gpg junk", "STRING"),
(None, "local-mongo-dir", True, "Copy packaging files from local mongo checkout", "DIRECTORY"),
]
shortopts = "".join([t[0] + (":" if t[2] else "") for t in flagspec if t[0] is not None])
@@ -796,4 +860,4 @@ if __name__ == "__main__":
# Examples:
-# ./makedist.py --local-gpg-dir=$HOME/10gen/dst/dist-gnupg /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot
+# ./makedist.py /tmp/ubuntu ubuntu 8.10 x86_64 HEAD:-snapshot
diff --git a/buildscripts/mergerepositories.py b/buildscripts/mergerepositories.py
new file mode 100755
index 00000000000..7864d837ef8
--- /dev/null
+++ b/buildscripts/mergerepositories.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+
+from libcloud.types import Provider
+from libcloud.providers import get_driver
+from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
+from libcloud.base import Node, NodeImage, NodeSize, NodeState
+
+# libcloud's SSH client seems to be one of those pointless wrappers
+# that (at the moment) both doesn't add anything to the thing it wraps
+# (Paramiko) and also fails to expose the underlying thing's features.
+# What's wrong with people?
+#from libcloud.ssh import SSHClient
+
+import time
+import sys
+import settings
+import subprocess
+import os
+import socket
+
+EC2 = get_driver(Provider.EC2)
+EC2Driver=EC2NodeDriver(settings.id, settings.key)
+
+def tryEC2():
+
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ node = None
+ try:
+ node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, keyname="kp1", securitygroup=['default', 'dist-slave', 'buildbot-slave'])
+ print node
+ print node.id
+ while node.state == NodeState.PENDING:
+ time.sleep(3)
+ finally:
+ if node:
+ node.destroy()
+
+
+# I don't think libcloud's Nodes implement __enter__ and __exit__, and
+# I like the with statement for ensuring that we don't leak nodes when
+# we don't have to.
+class ubuntuNode(object):
+ def __init__(self):
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
+
+ def initWait(self):
+ print "waiting for node to spin up"
+ # Wait for EC2 to tell us the node is running.
+ while 1:
+ ## XXX: it seems as if existing nodes' states don't get
+ ## updated, so we poll EC2 until we get a RUNNING node
+ ## with the desired id.
+
+ #EC2Driver.list_nodes()
+ #print self.node
+ #if self.node.state == NodeState.PENDING:
+ # time.sleep(10)
+ #else:
+ # break
+ n=[n for n in EC2Driver.list_nodes() if (n.id==self.node.id)][0]
+ if n.state == NodeState.PENDING:
+ time.sleep(10)
+ else:
+ self.node = n
+ break
+ print "ok"
+ # Now wait for the node's sshd to be accepting connections.
+ print "waiting for ssh"
+ sshwait = True
+ if sshwait == False:
+ return
+ while sshwait:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ try:
+ s.connect((self.node.public_ip[0], 22))
+ sshwait = False
+ print "connected on port 22 (ssh)"
+ time.sleep(15) # arbitrary timeout, in case the
+ # remote sshd is slow.
+ except socket.error, err:
+ pass
+ finally:
+ s.close()
+ time.sleep(3) # arbitrary timeout
+ print "ok"
+
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, arg0, arg1, arg2):
+ print "shutting down node %s" % self.node
+ self.node.destroy()
+
+
+def tryRackSpace():
+ driver=get_driver(Provider.RACKSPACE)
+ conn = driver('tengen', '7d67202d37af58a7adb32cb1626452c4')
+ string='Fedora 11'
+ images=filter(lambda x: (x.name.find(string) > -1), conn.list_images())
+ sizes=conn.list_sizes()
+ sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
+ node = None
+ if len(images) != 1:
+ raise "too many images with \"%s\" in the name" % string
+ try:
+ image = images[0]
+ node = conn.create_node(image=image, name=string, size=sizes[0])
+ print node
+ print node.extras['password']
+ while node.state == NodeState.PENDING:
+ time.sleep(10)
+ finally:
+ if node:
+ node.destroy()
+
+class Err(Exception):
+ pass
+def run_for_effect(argv):
+ print " ".join(argv)
+ r=subprocess.Popen(argv).wait()
+ if r!=0:
+ raise Err("subprocess %s exited %d" % (argv, r))
+
+if __name__ == "__main__":
+ (dir, outdir) = sys.argv[-2:]
+ dirtail=dir.rstrip('\/').split('/')[-1]
+
+ gpgdir=settings.makedist['gpg_homedir']
+ keyfile=settings.makedist['ssh_keyfile']
+
+ makeaptrepo="""for x in debian ubuntu; do (cd $x; for d in `find . -name *.deb | sed 's|^./||; s|/[^/]*$||' | sort -u`; do dpkg-scanpackages $d > $d/Packages; gzip -9c $d/Packages > $d/Packages.gz; done) ; done"""
+ makereleaseprologue="""Origin: 10gen
+Label: 10gen
+Suite: 10gen
+Codename: VVVVVV
+Version: VVVVVV
+Architectures: i386 amd64
+Components: 10gen
+Description: 10gen packages"""
+ makeaptrelease="""find . -maxdepth 3 -mindepth 3 | while read d; do ( cd $d && (echo '%s' | sed s/VVVVVV/$(basename $(pwd))/; apt-ftparchive release .) > /tmp/Release && mv /tmp/Release . && gpg -r `gpg --list-keys | grep uid | awk '{print $(NF)}'` --no-secmem-warning --no-tty -abs --output Release.gpg Release ); done""" % makereleaseprologue
+ with ubuntuNode() as ubuntu:
+ ubuntu.initWait()
+ print ubuntu.node
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sudo", "sh", "-c", "\"export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -y install debhelper\""])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "ubuntu@"+ubuntu.node.public_ip[0]+":"])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", gpgdir, "ubuntu@"+ubuntu.node.public_ip[0]+":.gnupg"])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sh", "-c", "\"ls -lR ./" + dirtail + "\""])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrepo])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrelease])
+ run_for_effect(["scp", "-i", keyfile, "-r", "ubuntu@"+ubuntu.node.public_ip[0]+":./"+dirtail +'/*', outdir])
+
+ # TODO: yum repositories
+
+
+ #main()
+ #tryRackSpace()
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index b12174cfafb..581a8db4324 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -426,7 +426,7 @@ namespace mongo {
auto_ptr<DBClientCursor> c =
this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
- massert( 10276 , "DBClientBase::findOne: transport error", c.get() );
+ uassert( 10276 , "DBClientBase::findOne: transport error", c.get() );
if ( !c->more() )
return BSONObj();
@@ -439,25 +439,13 @@ namespace mongo {
string ip;
int port;
- size_t idx = serverAddress.find( ":" );
+ size_t idx = serverAddress.rfind( ":" );
if ( idx != string::npos ) {
port = strtol( serverAddress.substr( idx + 1 ).c_str(), 0, 10 );
ip = serverAddress.substr( 0 , idx );
- ip = hostbyname(ip.c_str());
} else {
port = CmdLine::DefaultDBPort;
- if (serverAddress.find( "/" ) == string::npos){
- ip = hostbyname( serverAddress.c_str() );
- } else {
- ip = serverAddress;
- }
- }
- if( ip.empty() ) {
- stringstream ss;
- ss << "client connect: couldn't parse/resolve hostname: " << _serverAddress;
- errmsg = ss.str();
- failed = true;
- return false;
+ ip = serverAddress;
}
// we keep around SockAddr for connection life -- maybe MessagingPort
@@ -465,24 +453,14 @@ namespace mongo {
server = auto_ptr<SockAddr>(new SockAddr(ip.c_str(), port));
p = auto_ptr<MessagingPort>(new MessagingPort());
-#if 0
- //Right now some code depends on ports to identify a connection.
- //Using unix sockets breaks this code
-
-#ifndef _WIN32
- if (server->getAddr() == "127.0.0.1"){
- SockAddr _server (makeUnixSockPath(port).c_str(), port);
- if (p->connect(_server)){
- *server = _server;
- return true;
- }
+ if (server->getAddr() == "0.0.0.0"){
+ failed = true;
+ return false;
}
-#endif
-#endif
if ( !p->connect(*server) ) {
stringstream ss;
- ss << "couldn't connect to server " << serverAddress << " " << ip << ":" << port;
+ ss << "couldn't connect to server {ip: \"" << ip << "\", port: " << port << '}';
errmsg = ss.str();
failed = true;
return false;
@@ -741,7 +719,7 @@ namespace mongo {
if ( !port().call(toSend, response) ) {
failed = true;
if ( assertOk )
- massert( 10278 , "dbclient error communicating with server", false);
+ uassert( 10278 , "dbclient error communicating with server", false);
return false;
}
}
diff --git a/client/dbclient.h b/client/dbclient.h
index ebd3b7379de..be344719a89 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -769,6 +769,7 @@ namespace mongo {
false was returned -- it will try to connect again.
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ If you use IPv6 you must add a port number ( ::1:27017 )
@param errmsg any relevant error message will appended to the string
@return false if fails to connect.
*/
diff --git a/db/client.cpp b/db/client.cpp
index d8114c2c291..dbfcbd02aae 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -275,4 +275,21 @@ namespace mongo {
} handshakeCmd;
+
+ int Client::recommendedYieldMicros(){
+ int num = 0;
+ {
+ scoped_lock bl(clientsMutex);
+ num = clients.size();
+ }
+
+ if ( --num <= 0 ) // -- is for myself
+ return 0;
+
+ if ( num > 50 )
+ num = 50;
+
+ num *= 100;
+ return num;
+ }
}
diff --git a/db/client.h b/db/client.h
index 022c530fd12..96dbdc085c6 100644
--- a/db/client.h
+++ b/db/client.h
@@ -45,6 +45,8 @@ namespace mongo {
static mongo::mutex clientsMutex;
static set<Client*> clients; // always be in clientsMutex when manipulating this
+ static int recommendedYieldMicros();
+
class GodScope {
bool _prev;
public:
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index 3a1c7391640..5b40cf29a14 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -233,6 +233,7 @@ namespace mongo {
{
dbtempreleasecond unlock;
+ sleepmicros( Client::recommendedYieldMicros() );
}
if ( ClientCursor::find( id , false ) == 0 ){
diff --git a/db/db.cpp b/db/db.cpp
index f210e3f1b16..a913c84411a 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -351,7 +351,7 @@ namespace mongo {
Client::GodScope gs;
log(1) << "enter repairDatabases" << endl;
- assert(checkNsFilesOnLoad);
+// assert(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
dblock lk;
diff --git a/db/db.vcproj b/db/db.vcproj
index 3ea7506c802..ecab6a7b7af 100644
--- a/db/db.vcproj
+++ b/db/db.vcproj
@@ -1587,6 +1587,10 @@
>
</File>
<File
+ RelativePath="..\util\ramstore.cpp"
+ >
+ </File>
+ <File
RelativePath=".\repl.cpp"
>
</File>
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index 7fd36d24a5d..e6d79f521a4 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -746,13 +746,28 @@ namespace mongo {
// todo: can be a little faster if we don't use toString() here.
bool BSONObj::valid() const {
- try {
- toString();
- }
- catch (...) {
- return false;
+ try{
+ BSONObjIterator it(*this);
+ while( it.moreWithEOO() ){
+ // both throw exception on failure
+ BSONElement e = it.next(true);
+ e.validate();
+
+ if (e.eoo()){
+ if (it.moreWithEOO())
+ return false;
+ return true;
+ }else if (e.isABSONObj()){
+ if(!e.embeddedObject().valid())
+ return false;
+ }else if (e.type() == CodeWScope){
+ if(!e.codeWScopeObject().valid())
+ return false;
+ }
+ }
+ } catch (...) {
}
- return true;
+ return false;
}
/* well ordered compare */
diff --git a/db/namespace.cpp b/db/namespace.cpp
index ea34e966eb5..1e75f6d78ea 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -187,15 +187,16 @@ namespace mongo {
if ( capped == 0 ) {
if ( left < 24 || left < (lenToAlloc >> 3) ) {
// you get the whole thing.
+ DataFileMgr::grow(loc, regionlen);
return loc;
}
}
/* split off some for further use. */
r->lengthWithHeaders = lenToAlloc;
+ DataFileMgr::grow(loc, lenToAlloc);
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
- /* TODOMMF split */
DeletedRecord *newDel = DataFileMgr::makeDeletedRecord(newDelLoc, left);
newDel->extentOfs = r->extentOfs;
newDel->lengthWithHeaders = left;
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index b3c5aa6eaac..470b00b249c 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -452,6 +452,7 @@ namespace mongo {
/*---------------------------------------------------------------------*/
DiskLoc Extent::reuse(const char *nsname) {
+ /*TODOMMF - work to do when extent is freed. */
log(3) << "reset extent was:" << nsDiagnostic.buf << " now:" << nsname << '\n';
massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
xnext.Null();
@@ -465,9 +466,10 @@ namespace mongo {
int delRecLength = length - (_extentData - (char *) this);
//DeletedRecord *empty1 = (DeletedRecord *) extentData;
- DeletedRecord *empty = (DeletedRecord *) getRecord(emptyLoc);
+ DeletedRecord *empty = DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength);//(DeletedRecord *) getRecord(emptyLoc);
//assert( empty == empty1 );
- memset(empty, delRecLength, 1);
+
+ // do we want to zero the record? memset(empty, ...)
empty->lengthWithHeaders = delRecLength;
empty->extentOfs = myLoc.getOfs();
diff --git a/db/pdfile.h b/db/pdfile.h
index c074b8c36ab..065f05b5be1 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -83,6 +83,7 @@ namespace mongo {
Extent* _getExtent(DiskLoc loc);
Record* recordAt(DiskLoc dl);
Record* makeRecord(DiskLoc dl, int size);
+ void grow(DiskLoc dl, int size);
MMF mmf;
MMF::Pointer _p;
@@ -121,6 +122,7 @@ namespace mongo {
static Extent* getExtent(const DiskLoc& dl);
static Record* getRecord(const DiskLoc& dl);
static DeletedRecord* makeDeletedRecord(const DiskLoc& dl, int len);
+ static void grow(const DiskLoc& dl, int len);
/* does not clean up indexes, etc. : just deletes the record in the pdfile. */
void _deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl);
@@ -331,6 +333,11 @@ namespace mongo {
return (Record*) _p.at(ofs, -1);
}
+ inline void MongoDataFile::grow(DiskLoc dl, int size) {
+ int ofs = dl.getOfs();
+ _p.grow(ofs, size);
+ }
+
inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
int ofs = dl.getOfs();
assert( ofs >= DataFileHeader::HeaderSize );
@@ -456,9 +463,16 @@ namespace mongo {
return cc().database()->getFile(dl.a())->recordAt(dl);
}
+ BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
+
+ inline void DataFileMgr::grow(const DiskLoc& dl, int len) {
+ assert( dl.a() != -1 );
+ cc().database()->getFile(dl.a())->grow(dl, len);
+ }
+
inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
assert( dl.a() != -1 );
- return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, len);
+ return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
}
void ensureHaveIdIndex(const char *ns);
diff --git a/dbtests/socktests.cpp b/dbtests/socktests.cpp
index c263f2e0a87..4a7e9b8997d 100644
--- a/dbtests/socktests.cpp
+++ b/dbtests/socktests.cpp
@@ -29,6 +29,7 @@ namespace SockTests {
void run() {
ASSERT_EQUALS( "127.0.0.1", hostbyname( "localhost" ) );
ASSERT_EQUALS( "127.0.0.1", hostbyname( "127.0.0.1" ) );
+ ASSERT_EQUALS( "::1", hostbyname( "::1" ) );
}
};
diff --git a/dbtests/test.vcproj b/dbtests/test.vcproj
index 002d4646662..5cff466dfb6 100644
--- a/dbtests/test.vcproj
+++ b/dbtests/test.vcproj
@@ -1563,6 +1563,10 @@
>
</File>
<File
+ RelativePath="..\util\ramstore.cpp"
+ >
+ </File>
+ <File
RelativePath="..\db\repl.cpp"
>
</File>
diff --git a/jstests/pullall2.js b/jstests/pullall2.js
new file mode 100644
index 00000000000..61369badaa4
--- /dev/null
+++ b/jstests/pullall2.js
@@ -0,0 +1,20 @@
+
+t = db.pullall2
+t.drop()
+
+o = { _id : 1 , a : [] }
+for ( i=0; i<5; i++ )
+ o.a.push( { x : i , y : i } )
+
+t.insert( o )
+
+assert.eq( o , t.findOne() , "A" );
+
+t.update( {} , { $pull : { a : { x : 3 } } } )
+o.a = o.a.filter( function(z){ return z.x != 3 } )
+assert.eq( o , t.findOne() , "B" );
+
+t.update( {} , { $pull : { a : { x : { $in : [ 1 , 4 ] } } } } );
+o.a = o.a.filter( function(z){ return z.x != 1 } )
+o.a = o.a.filter( function(z){ return z.x != 4 } )
+assert.eq( o , t.findOne() , "C" );
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
index 150731e6e6b..d8d268dc058 100644
--- a/jstests/repl/snapshot3.js
+++ b/jstests/repl/snapshot3.js
@@ -48,4 +48,6 @@ rp.master().getDB( baseName )[ baseName ].save( {i:500} );
assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
+assert( !rawMongoProgramOutput().match( /SyncException/ ) );
+
+print("snapshot3.js finishes");
diff --git a/s/config.cpp b/s/config.cpp
index c3c3668c5ab..3eff56f4ec0 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -551,7 +551,7 @@ namespace mongo {
a << "abc.foo" << fromjson( "{ 'key' : { 'a' : 1 } , 'unique' : false }" );
a << "abc.bar" << fromjson( "{ 'key' : { 'kb' : -1 } , 'unique' : true }" );
- b.appendArray( "sharded" , a.obj() );
+ b.append( "sharded" , a.obj() );
DBConfig c;
testInOut( c , b.obj() );
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 5324f0036d5..e742a0910e8 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -173,8 +173,8 @@ string fixHost( string url , string host , string port ){
if ( url.find( "." ) != string::npos )
return url + "/test";
- if ( url.find( ":" ) != string::npos &&
- isdigit( url[url.find(":")+1] ) )
+ if ( url.rfind( ":" ) != string::npos &&
+ isdigit( url[url.rfind(":")+1] ) )
return url + "/test";
}
return url;
@@ -191,6 +191,10 @@ string fixHost( string url , string host , string port ){
string newurl = host;
if ( port.size() > 0 )
newurl += ":" + port;
+ else if (host.find(':') != string::npos){
+ // need to add port with IPv6 addresses
+ newurl += ":27017";
+ }
newurl += "/" + url;
diff --git a/util/goodies.h b/util/goodies.h
index f6a1f4c55d2..5b6c834480d 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -191,6 +191,8 @@ namespace mongo {
boost::thread::sleep(xt);
}
inline void sleepmicros(int s) {
+ if ( s <= 0 )
+ return;
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += ( s / 1000000 );
@@ -211,6 +213,8 @@ namespace mongo {
}
}
inline void sleepmicros(int s) {
+ if ( s <= 0 )
+ return;
struct timespec t;
t.tv_sec = (int)(s / 1000000);
t.tv_nsec = 1000 * ( s % 1000000 );
diff --git a/util/message.cpp b/util/message.cpp
index acc862fd2b6..948284764eb 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -82,7 +82,7 @@ namespace mongo {
/* listener ------------------------------------------------------------------- */
void Listener::initAndListen() {
- vector<SockAddr> mine = ipToAddrs(ip.c_str(), port);
+ vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _port);
vector<int> socks;
int maxfd = 0; // needed for select()
@@ -156,7 +156,7 @@ namespace mongo {
if ( s < 0 ) {
int x = errno; // so no global issues
if ( x == ECONNABORTED || x == EBADF ) {
- log() << "Listener on port " << port << " aborted" << endl;
+ log() << "Listener on port " << _port << " aborted" << endl;
return;
} if ( x == 0 && inShutdown() ){
return; // socket closed
@@ -166,7 +166,8 @@ namespace mongo {
}
if (from.getType() != AF_UNIX)
disableNagle(s);
- if ( ! cmdLine.quiet ) log() << "connection accepted from " << from.toString() << " #" << ++connNumber << endl;
+ if ( _logConnect && ! cmdLine.quiet )
+ log() << "connection accepted from " << from.toString() << " #" << ++connNumber << endl;
accepted(s, from);
}
}
@@ -382,6 +383,7 @@ again:
int z = (len+1023)&0xfffffc00;
assert(z>=len);
MsgData *md = (MsgData *) malloc(z);
+ assert(md);
md->len = len;
if ( len <= 0 ) {
diff --git a/util/message.h b/util/message.h
index 02695f253f7..8175aba30d1 100644
--- a/util/message.h
+++ b/util/message.h
@@ -31,7 +31,7 @@ namespace mongo {
class Listener {
public:
- Listener(const string &_ip, int p) : ip(_ip), port(p) { }
+ Listener(const string &ip, int p, bool logConnect=true ) : _ip(ip), _port(p), _logConnect(logConnect) { }
virtual ~Listener() {}
void initAndListen(); // never returns unless error (start a thread)
@@ -42,8 +42,9 @@ namespace mongo {
}
private:
- string ip;
- int port;
+ string _ip;
+ int _port;
+ bool _logConnect;
};
class AbstractMessagingPort {
diff --git a/util/miniwebserver.cpp b/util/miniwebserver.cpp
index 67e32d77549..89ca7b0756b 100644
--- a/util/miniwebserver.cpp
+++ b/util/miniwebserver.cpp
@@ -24,7 +24,7 @@
namespace mongo {
MiniWebServer::MiniWebServer(const string &ip, int port)
- : Listener(ip, port)
+ : Listener(ip, port, false)
{}
string MiniWebServer::parseURL( const char * buf ) {
diff --git a/util/mmap.h b/util/mmap.h
index 20edaf9a7a0..1cde87f5039 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -53,7 +53,8 @@ namespace mongo {
class Pointer {
public:
- void* at(int offset, int maxLen);
+ void* at(int offset, int len);
+ void grow(int offset, int len);
bool isNull() const;
};
@@ -69,6 +70,7 @@ namespace mongo {
Pointer() : _base(0) { }
Pointer(void *p) : _base((char*) p) { }
void* at(int offset, int maxLen) { return _base + offset; }
+ void grow(int offset, int len) { /* no action required with mem mapped file */ }
bool isNull() const { return _base == 0; }
};
@@ -110,9 +112,13 @@ namespace mongo {
void printMemInfo( const char * where );
-//#include "ramstore.h"
-// typedef RamStoreFile MMF;
- typedef MemoryMappedFile MMF;
+#include "ramstore.h"
+//#define _RAMSTORE
+#if defined(_RAMSTORE)
+ typedef RamStoreFile MMF;
+#else
+ typedef MemoryMappedFile MMF;
+#endif
} // namespace mongo
diff --git a/util/ramstore.cpp b/util/ramstore.cpp
new file mode 100644
index 00000000000..031508289d7
--- /dev/null
+++ b/util/ramstore.cpp
@@ -0,0 +1,93 @@
+/**
+* Copyright (C) 2008 10gen Inc.info
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "stdafx.h"
+#include "mmap.h"
+
+namespace mongo {
+
+ //extern bool checkNsFilesOnLoad;
+
+static set<RamStoreFile*> files;
+
+void RamStoreFile::grow(int offset, int len) {
+ cout << "GROW ofs:" << offset << " len:" << len;
+ assert( len > 0 );
+ Node& n = _m[offset];
+ cout << " oldlen:" << n.len << endl;
+ assert( n.len > 0 );
+ if( len > n.len ) {
+ n.p = (char *) realloc(n.p, len);
+ memset(((char *)n.p) + n.len, 0, len - n.len);
+ n.len = len;
+ }
+}
+
+/* maxLen can be -1 for existing data */
+void* RamStoreFile::at(int offset, int maxLen) {
+ if( offset != _last ) {
+ if( _m.count(_last) ) {
+ _m[_last].check();
+ if( !(offset < _last || offset >= _last + _m[_last].len) ) {
+ cout << offset << ' ' << _last << ' ' << _m[_last].len << endl;
+ assert(false);
+ }
+ }
+ }
+ _last = offset;
+
+ Node& n = _m[offset];
+ if( n.len == 0 ) {
+ // create
+ if( strstr(name, ".ns") == 0 )
+ cout << "CREATE " << name << " ofs:" << offset << " len:" << maxLen << endl;
+ assert( maxLen >= 0 );
+ n.p = (char *) calloc(maxLen+1, 1);
+ n.len = maxLen;
+ }
+ assert( n.len >= maxLen );
+ n.check();
+ return n.p;
+ }
+
+void RamStoreFile::Node::check() {
+ assert( p[len] == 0 );
+}
+
+void RamStoreFile::check() {
+ for( std::map<int,Node>::iterator i = _m.begin(); i != _m.end(); i++ ) {
+ i->second.check();
+ }
+}
+
+void RamStoreFile::validate() {
+ for( set<RamStoreFile*>::iterator i = files.begin(); i != files.end(); i++ ) {
+ (*i)->check();
+ }
+}
+
+RamStoreFile::~RamStoreFile() {
+ check();
+ files.erase(this);
+}
+
+RamStoreFile::RamStoreFile() : _len(0) {
+ // checkNsFilesOnLoad = false;
+ files.insert(this);
+}
+
+}
+
diff --git a/util/ramstore.h b/util/ramstore.h
index 183f91f9310..f75a57a5a69 100644
--- a/util/ramstore.h
+++ b/util/ramstore.h
@@ -17,29 +17,28 @@
* limitations under the License.
*/
+extern bool checkNsFilesOnLoad;
+
class RamStoreFile : public MongoFile {
char name[256];
struct Node {
- void *p;
+ char *p;
int len;
Node() : len(0) { }
+ void check();
};
- map<int,Node> _m;
+ std::map<int,Node> _m;
long _len;
+ static void validate();
+ void check();
+
+ int _last;
+
+ void grow(int offset, int len);
+
/* maxLen can be -1 for existing data */
- void* at(int offset, int maxLen) {
- Node& n = _m[offset];
- if( n.len == 0 ) {
- // create
- cout << "CREATE ofs:" << offset << " len:" << maxLen << endl;
- assert( maxLen >= 0 );
- n.p = calloc(maxLen+1, 1);
- n.len = maxLen;
- }
- assert( n.len >= maxLen );
- return n.p;
- }
+ void* at(int offset, int maxLen);
protected:
virtual void close() {
@@ -51,7 +50,8 @@ protected:
virtual void flush(bool sync) { }
public:
- RamStoreFile() : _len(0) { }
+ ~RamStoreFile();
+ RamStoreFile();
virtual long length() { return _len; }
@@ -59,10 +59,14 @@ public:
RamStoreFile* _f;
friend class RamStoreFile;
public:
- void* at(int offset, int maxLen) {
- assert( maxLen <= /*MaxBSONObjectSize*/4*1024*1024 + 128 );
- return _f->at(offset,maxLen);
+ void* at(int offset, int len) {
+ assert( len <= /*MaxBSONObjectSize*/4*1024*1024 + 128 );
+ return _f->at(offset,len);
}
+ void grow(int offset, int len) {
+ assert( len <= /*MaxBSONObjectSize*/4*1024*1024 + 128 );
+ _f->grow(offset,len);
+ }
bool isNull() const { return _f == 0; }
};
diff --git a/util/sock.cpp b/util/sock.cpp
index f10c8d8e2f6..f6659809536 100644
--- a/util/sock.cpp
+++ b/util/sock.cpp
@@ -31,6 +31,9 @@ namespace mongo {
}
SockAddr::SockAddr(const char * iporhost , int port) {
+ if (!strcmp(iporhost, "localhost"))
+ iporhost = "127.0.0.1";
+
if (strchr(iporhost, '/')){
#ifdef _WIN32
uassert(13080, "no unix socket support on windows", false);
@@ -39,22 +42,26 @@ namespace mongo {
as<sockaddr_un>().sun_family = AF_UNIX;
strcpy(as<sockaddr_un>().sun_path, iporhost);
addressSize = sizeof(sockaddr_un);
- }else if (strchr(iporhost, ':')){
- as<sockaddr_in6>().sin6_family = AF_INET6;
- as<sockaddr_in6>().sin6_port = htons(port);
-#ifdef _WIN32
- uassert(13081, "No IPv6 support on windows", false);
-#else
- inet_pton(AF_INET6, iporhost, &as<sockaddr_in6>().sin6_addr);
-#endif
- addressSize = sizeof(sockaddr_in6);
- } else {
- string ip = hostbyname( iporhost );
- memset(as<sockaddr_in>().sin_zero, 0, sizeof(as<sockaddr_in>().sin_zero));
- as<sockaddr_in>().sin_family = AF_INET;
- as<sockaddr_in>().sin_port = htons(port);
- as<sockaddr_in>().sin_addr.s_addr = inet_addr(ip.c_str());
- addressSize = sizeof(sockaddr_in);
+ }else{
+ addrinfo* addrs = NULL;
+ addrinfo hints;
+ memset(&hints, 0, sizeof(addrinfo));
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_ADDRCONFIG;
+
+ stringstream ss;
+ ss << port;
+ int ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
+ if (ret){
+ log() << "getaddrinfo(\"" << iporhost << "\") failed: " << gai_strerror(ret) << endl;
+ *this = SockAddr(port);
+ }else{
+ //TODO: handle other addresses in linked list;
+ assert(addrs->ai_addrlen <= sizeof(sa));
+ memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
+ addressSize = addrs->ai_addrlen;
+ freeaddrinfo(addrs);
+ }
}
}
@@ -70,23 +77,11 @@ namespace mongo {
}
string hostbyname(const char *hostname) {
- static string unknown = "0.0.0.0";
- if ( unknown == hostname )
- return unknown;
-
- scoped_lock lk(sock_mutex);
-#if defined(_WIN32)
- if( inet_addr(hostname) != INADDR_NONE )
- return hostname;
-#else
- struct in_addr temp;
- if ( inet_aton( hostname, &temp ) )
- return hostname;
-#endif
- struct hostent *h;
- h = gethostbyname(hostname);
- if ( h == 0 ) return "";
- return inet_ntoa( *((struct in_addr *)(h->h_addr)) );
+ string addr = SockAddr(hostname, 0).getAddr();
+ if (addr == "0.0.0.0")
+ return "";
+ else
+ return addr;
}
class UDPConnection {
diff --git a/util/sock.h b/util/sock.h
index 0a51c04daaa..99d5f8868e2 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -35,6 +35,9 @@ namespace mongo {
inline int getLastError() {
return WSAGetLastError();
}
+ inline const char* gai_strerror(int code) {
+ return ::gai_strerrorA(code);
+ }
inline void disableNagle(int sock) {
int x = 1;
if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
@@ -154,20 +157,17 @@ namespace mongo {
}
string getAddr() const {
- const int buflen=128;
-#if !defined(_WIN32)
- char buffer[buflen];
-#endif
-
switch (getType()){
-#ifdef _WIN32
- case AF_INET: return inet_ntoa(as<sockaddr_in>().sin_addr);
- case AF_INET6: return "No IPv6 support on windows";
-#else
- case AF_INET: return inet_ntop(getType(), &as<sockaddr_in>().sin_addr, buffer, addressSize);
- case AF_INET6: return inet_ntop(getType(), &as<sockaddr_in6>().sin6_addr, buffer, addressSize);
-#endif
- case AF_UNIX: return (addressSize > 2 ?as<sockaddr_un>().sun_path : "anonymous unix socket");
+ case AF_INET:
+ case AF_INET6: {
+ const int buflen=128;
+ char buffer[buflen];
+ int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
+ massert(13082, gai_strerror(ret), ret == 0);
+ return buffer;
+ }
+
+ case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
case AF_UNSPEC: return "(NONE)";
default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
}