summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Kreuter <richard@10gen.com>2010-04-07 17:08:59 -0400
committerRichard Kreuter <richard@10gen.com>2010-04-07 17:09:37 -0400
commit41394fd4b393587ea0bae28adde6ae04abf5b2d8 (patch)
tree5636cdeabc1e669897cf2e97033d2885de6bf0bc
parent9c932e1c3b3227a8fe045e94069b597c8bb12f46 (diff)
downloadmongo-41394fd4b393587ea0bae28adde6ae04abf5b2d8.tar.gz
Two new scripts for automating pkg repository construction.
-rwxr-xr-xbuildscripts/makealldists.py263
-rwxr-xr-xbuildscripts/mergerepositories.py163
2 files changed, 426 insertions, 0 deletions
diff --git a/buildscripts/makealldists.py b/buildscripts/makealldists.py
new file mode 100755
index 00000000000..0a3c5207a08
--- /dev/null
+++ b/buildscripts/makealldists.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+import subprocess
+import sys
+import os
+import time
+import tempfile
+import errno
+import glob
+import shutil
+import settings
+import simples3
+
+def s3bucket():
+ return simples3.S3Bucket(settings.bucket, settings.id, settings.key)
+
+def s3cp (bucket, filename, s3name):
+ defaultacl="public-read"
+ bucket.put(s3name, open(filename, "rb").read(), acl=defaultacl)
+
+def pushrepo(repodir):
+ files=subprocess.Popen(['find', repodir, '-name', 'Packages*', '-o', '-name', '*.deb', '-o', '-name', 'Release*'], stdout=subprocess.PIPE).communicate()[0][:-1].split('\n')
+ bucket=s3bucket()
+ olddebs=[t[0] for t in bucket.listdir(prefix='distros/') if t[0].endswith('.deb')]
+ for fn in files:
+ tail = fn[len(repodir):]
+ # Note: be very careful not to produce s3names containing
+ # sequences of repeated slashes: s3 doesn't treat a////b as
+ # equivalent to a/b.
+ s3name='distros-archive/'+time.strftime('%Y%m%d')+tail
+ #print fn, s3name
+ s3cp(bucket, fn, s3name)
+ s3name='distros'+tail
+ s3cp(bucket, fn, s3name)
+ # FIXME: delete the old
+ [bucket.delete(olddeb) for olddeb in olddebs]
+
+ shutil.rmtree(outputroot)
+ shutil.rmtree(mergedir)
+ shutil.rmtree(repodir)
+
+def cat (inh, outh):
+ inh.seek(0)
+ for line in inh:
+ outh.write(line)
+ inh.close()
+
+# This generates all tuples from mixed-radix counting system, essentially.
+def gen(listlist):
+ dim=len(listlist)
+ a=[0 for ignore in listlist]
+ while True:
+ yield [listlist[i][a[i]] for i in range(dim)]
+ a[0]+=1
+ for j in range(dim):
+ if a[j] == len(listlist[j]):
+ if j<dim-1:
+ a[j+1]+=1
+ else:
+ return
+ a[j]=0
+
+def dirify(string):
+ return (string if string[-1:] in '\/' else string+'/')
+def fileify(string):
+ return (string if string[-1:] not in '\/' else string.rstrip('\/'))
+
+# WTF: os.makedirs errors if the leaf exists?
+def makedirs(f):
+ try:
+ os.makedirs(f)
+ except OSError as exc: # Python >2.5
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise exc
+
+
+
+# This is a fairly peculiar thing to want to do, but our build process
+# creates several apt repositories for each mongo version we build on
+# any given Debian/Ubutnu release. To merge repositories together, we
+# must concatenate the Packages.gz files.
+def merge_directories_concatenating_conflicts (target, sources):
+ print sources
+ target = dirify(target)
+ for source in sources:
+ source = dirify(source)
+ files = subprocess.Popen(["find", source, "-type", "f"], stdout=subprocess.PIPE).communicate()[0].split('\n')
+ for f in files:
+ if f == '':
+ continue
+ rel = f[len(source):]
+ o=target+rel
+ makedirs(os.path.dirname(o))
+ with open(f) as inh:
+ with open(target+rel, "a") as outh:
+ outh.write(inh.read())
+
+
+def parse_mongo_version_spec(spec):
+ l = spec.split(':')
+ if len(l) == 1:
+ l+=['','']
+ elif len(l) == 2:
+ l+=['']
+ return l
+
+def logfh(distro, distro_version, arch, mongo_version):
+ prefix = "%s-%s-%s-%s.log." % (distro, distro_version, arch, mongo_version)
+ # This is a NamedTemporaryFile mostly so that I can tail(1) them
+ # as we go.
+ return tempfile.NamedTemporaryFile("w+b", -1, prefix=prefix)
+
+def spawn(distro, distro_version, arch, spec, directory, opts):
+ (mongo_version, suffix, pkg_version) = parse_mongo_version_spec(spec)
+ argv = ["makedist.py"] + opts + [ directory, distro, distro_version, arch ] + [ spec ]
+# cmd = "mkdir -p %s; cd %s; touch foo.deb; echo %s %s %s %s %s | tee Packages " % ( directory, directory, directory, distro, distro_version, arch, mongo_version )
+# print cmd
+# argv = ["sh", "-c", cmd]
+ fh = logfh(distro, distro_version, arch, mongo_version)
+ print >> fh, "Running %s" % argv
+ # it's often handy to be able to run these things at the shell
+ # manually. FIXME: this ought to be slightly less than thoroughly
+ # ignorant of quoting issues (as is is now).
+ print >> fh, " ".join(argv)
+ fh.flush()
+ proc = subprocess.Popen(argv, stdin=None, stdout=fh, stderr=fh)
+ return (proc, fh, distro, distro_version, arch, spec)
+
+def win(name, logfh, winfh):
+ logfh.seek(0)
+ print >> winfh, "=== Winner %s ===" % name
+ cat(logfh, winfh)
+ print >> winfh, "=== End winner %s ===" % name
+
+def lose(name, logfh, losefh):
+ logfh.seek(0)
+ print >> losefh, "=== Loser %s ===" % name
+ cat(logfh, losefh)
+ print >> losefh, "=== End loser %s ===" % name
+
+def wait(procs, winfh, losefh, winners, losers):
+ try:
+ (pid, stat) = os.wait()
+ except OSError, err:
+ print >> sys.stderr, "This shouldn't happen."
+ print >> sys.stderr, err
+ next
+ if pid:
+ [tup] = [tup for tup in procs if tup[0].pid == pid]
+ (proc, logfh, distro, distro_version, arch, mongo_version) = tup
+ procs.remove(tup)
+ name = "%s %s %s %s" % (distro, distro_version, arch, mongo_version)
+ if os.WIFEXITED(stat):
+ if os.WEXITSTATUS(stat) == 0:
+ win(name, logfh, winfh)
+ winners.append(name)
+ else:
+ lose(name, logfh, losefh)
+ losers.append(name)
+ if os.WIFSIGNALED(stat):
+ lose(name, logfh, losefh)
+ losers.append(name)
+
+
+
+def __main__():
+ # FIXME: getopt & --help.
+ print " ".join(sys.argv)
+ branches = sys.argv[-1]
+ makedistopts = sys.argv[1:-1]
+
+ # Output from makedist.py goes here.
+ outputroot=tempfile.mkdtemp()
+ mergedir=tempfile.mkdtemp()
+ repodir=tempfile.mkdtemp()
+
+ print "makedist output under: %s\nmerge directory: %s\ncombined repo: %s\n" % (outputroot, mergedir, repodir)
+ # Add more dist/version/architecture tuples as they're supported.
+ dists = (("ubuntu", "10.4"),
+ ("ubuntu", "9.10"),
+ ("ubuntu", "9.4"),
+ ("ubuntu", "8.10"),
+ ("debian", "5.0"))
+ arches = ("x86", "x86_64")
+ mongos = branches.split(',')
+ # Run a makedist for each distro/version/architecture tuple above.
+ winners = []
+ losers = []
+ winfh=tempfile.TemporaryFile()
+ losefh=tempfile.TemporaryFile()
+ procs = []
+ count = 0
+ for ((distro, distro_version), arch, spec) in gen([dists, arches, mongos]):
+ count+=1
+ (mongo_version,_,_) = parse_mongo_version_spec(spec)
+ # blech: the "Packages.gz" metadata files in a Debian
+ # repository will clobber each other unless we make a
+ # different "repository" for each mongo version we're
+ # building.
+ if distro in ["debian", "ubuntu"]:
+ outputdir = "%s/%s/%s" % (outputroot, mongo_version, distro)
+ else:
+ outputdir = outputroot
+ makedistopts += "--subdirs"
+
+ procs.append(spawn(distro, distro_version, arch, spec, outputdir, makedistopts))
+
+ if len(procs) == 8:
+ wait(procs, winfh, losefh, winners, losers)
+
+ while procs:
+ wait(procs, winfh, losefh, winners, losers)
+
+ winfh.seek(0)
+ losefh.seek(0)
+ nwinners=len(winners)
+ nlosers=len(losers)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ cat(winfh, sys.stdout)
+ cat(losefh, sys.stdout)
+ print "%d winners; %d losers" % (nwinners, nlosers)
+ if count == nwinners + nlosers:
+ print "All jobs accounted for"
+# return 0
+ else:
+ print "Lost some jobs...?"
+ return 1
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ merge_directories_concatenating_conflicts(mergedir, glob.glob(outputroot+'/*'))
+
+ argv=["mergerepositories.py", mergedir, repodir]
+ print "running %s" % argv
+ print " ".join(argv)
+ r = subprocess.Popen(argv).wait()
+ if r != 0:
+ raise Exception("mergerepositories.py exited %d" % r)
+ print repodir
+ pushrepo(repodir)
+
+ return 0
+
+
+if __name__ == '__main__':
+ __main__()
+
+
+# FIXME: this ought to be someplace else.
+
+# FIXME: remove this comment when the buildbot does this. After this
+# program, run something that amounts to
+#
+# find /tmp/distros -name *.deb -or -name Packages.gz | while read f; do echo "./s3cp.py $f ${f#/tmp/}"; done
+#
+# where ./s3cp.py is a trivial s3 put executable in this directory.
+
+# merge_directories_concatenating_conflicts('/tmp/distros/debian', '/tmp/distros-20100222/debian/HEAD', '/tmp/distros-20100222/debian/r1.3.2','/tmp/distros-20100222/debian/v1.2')
+
+# merge_directories_concatenating_conflicts('/tmp/distros/ubuntu', '/tmp/distros-20100222/ubuntu/HEAD', '/tmp/distros-20100222/ubuntu/r1.3.2', '/tmp/distros-20100222/ubuntu/v1.2')
diff --git a/buildscripts/mergerepositories.py b/buildscripts/mergerepositories.py
new file mode 100755
index 00000000000..7864d837ef8
--- /dev/null
+++ b/buildscripts/mergerepositories.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+
+from libcloud.types import Provider
+from libcloud.providers import get_driver
+from libcloud.drivers.ec2 import EC2NodeDriver, NodeImage
+from libcloud.base import Node, NodeImage, NodeSize, NodeState
+
+# libcloud's SSH client seems to be one of those pointless wrappers
+# that (at the moment) both doesn't add anything to the thing it wraps
+# (Paramiko) and also fails to expose the underlying thing's features.
+# What's wrong with people?
+#from libcloud.ssh import SSHClient
+
+import time
+import sys
+import settings
+import subprocess
+import os
+import socket
+
+EC2 = get_driver(Provider.EC2)
+EC2Driver=EC2NodeDriver(settings.id, settings.key)
+
+def tryEC2():
+
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ node = None
+ try:
+ node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, keyname="kp1", securitygroup=['default', 'dist-slave', 'buildbot-slave'])
+ print node
+ print node.id
+ while node.state == NodeState.PENDING:
+ time.sleep(3)
+ finally:
+ if node:
+ node.destroy()
+
+
+# I don't think libcloud's Nodes implement __enter__ and __exit__, and
+# I like the with statement for ensuring that we don't leak nodes when
+# we don't have to.
+class ubuntuNode(object):
+ def __init__(self):
+ image=NodeImage('ami-bf07ead6', 'ubuntu 10.4', EC2)
+ size=NodeSize('m1.large', 'large', None, None, None, None, EC2)
+
+ self.node = EC2Driver.create_node(image=image, name="ubuntu-test", size=size, securitygroup=['default', 'dist-slave', 'buildbot-slave'], keyname='kp1')
+
+ def initWait(self):
+ print "waiting for node to spin up"
+ # Wait for EC2 to tell us the node is running.
+ while 1:
+ ## XXX: it seems as if existing nodes' states don't get
+ ## updated, so we poll EC2 until we get a RUNNING node
+ ## with the desired id.
+
+ #EC2Driver.list_nodes()
+ #print self.node
+ #if self.node.state == NodeState.PENDING:
+ # time.sleep(10)
+ #else:
+ # break
+ n=[n for n in EC2Driver.list_nodes() if (n.id==self.node.id)][0]
+ if n.state == NodeState.PENDING:
+ time.sleep(10)
+ else:
+ self.node = n
+ break
+ print "ok"
+ # Now wait for the node's sshd to be accepting connections.
+ print "waiting for ssh"
+ sshwait = True
+ if sshwait == False:
+ return
+ while sshwait:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ try:
+ s.connect((self.node.public_ip[0], 22))
+ sshwait = False
+ print "connected on port 22 (ssh)"
+ time.sleep(15) # arbitrary timeout, in case the
+ # remote sshd is slow.
+ except socket.error, err:
+ pass
+ finally:
+ s.close()
+ time.sleep(3) # arbitrary timeout
+ print "ok"
+
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, arg0, arg1, arg2):
+ print "shutting down node %s" % self.node
+ self.node.destroy()
+
+
+def tryRackSpace():
+ driver=get_driver(Provider.RACKSPACE)
+ conn = driver('tengen', '7d67202d37af58a7adb32cb1626452c4')
+ string='Fedora 11'
+ images=filter(lambda x: (x.name.find(string) > -1), conn.list_images())
+ sizes=conn.list_sizes()
+ sizes.sort(cmp=lambda x,y: int(x.ram)<int(y.ram))
+ node = None
+ if len(images) != 1:
+ raise "too many images with \"%s\" in the name" % string
+ try:
+ image = images[0]
+ node = conn.create_node(image=image, name=string, size=sizes[0])
+ print node
+ print node.extras['password']
+ while node.state == NodeState.PENDING:
+ time.sleep(10)
+ finally:
+ if node:
+ node.destroy()
+
+class Err(Exception):
+ pass
+def run_for_effect(argv):
+ print " ".join(argv)
+ r=subprocess.Popen(argv).wait()
+ if r!=0:
+ raise Err("subprocess %s exited %d" % (argv, r))
+
+if __name__ == "__main__":
+ (dir, outdir) = sys.argv[-2:]
+ dirtail=dir.rstrip('\/').split('/')[-1]
+
+ gpgdir=settings.makedist['gpg_homedir']
+ keyfile=settings.makedist['ssh_keyfile']
+
+ makeaptrepo="""for x in debian ubuntu; do (cd $x; for d in `find . -name *.deb | sed 's|^./||; s|/[^/]*$||' | sort -u`; do dpkg-scanpackages $d > $d/Packages; gzip -9c $d/Packages > $d/Packages.gz; done) ; done"""
+ makereleaseprologue="""Origin: 10gen
+Label: 10gen
+Suite: 10gen
+Codename: VVVVVV
+Version: VVVVVV
+Architectures: i386 amd64
+Components: 10gen
+Description: 10gen packages"""
+ makeaptrelease="""find . -maxdepth 3 -mindepth 3 | while read d; do ( cd $d && (echo '%s' | sed s/VVVVVV/$(basename $(pwd))/; apt-ftparchive release .) > /tmp/Release && mv /tmp/Release . && gpg -r `gpg --list-keys | grep uid | awk '{print $(NF)}'` --no-secmem-warning --no-tty -abs --output Release.gpg Release ); done""" % makereleaseprologue
+ with ubuntuNode() as ubuntu:
+ ubuntu.initWait()
+ print ubuntu.node
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sudo", "sh", "-c", "\"export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -y install debhelper\""])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", dir, "ubuntu@"+ubuntu.node.public_ip[0]+":"])
+ run_for_effect(["scp", "-o", "StrictHostKeyChecking no","-i", keyfile, "-r", gpgdir, "ubuntu@"+ubuntu.node.public_ip[0]+":.gnupg"])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "sh", "-c", "\"ls -lR ./" + dirtail + "\""])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrepo])
+ run_for_effect(["ssh", "-o", "StrictHostKeyChecking no","-i", keyfile, "ubuntu@"+ubuntu.node.public_ip[0], "cd ./"+dirtail + " && " + makeaptrelease])
+ run_for_effect(["scp", "-i", keyfile, "-r", "ubuntu@"+ubuntu.node.public_ip[0]+":./"+dirtail +'/*', outdir])
+
+ # TODO: yum repositories
+
+
+ #main()
+ #tryRackSpace()