summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Firth <dan.firth@codethink.co.uk>2016-11-29 13:01:14 +0000
committerDaniel Firth <dan.firth@codethink.co.uk>2016-11-29 13:01:14 +0000
commit54aff1da1ea84c78e768681157f053c753c4df36 (patch)
treec323b4dc3296fd5b7c7f698bb0634f1964149c62
parenta53b5fd06a392a7d4ce8b03a5416114ad3726faf (diff)
downloadybd-54aff1da1ea84c78e768681157f053c753c4df36.tar.gz
Revert "Move chdir, timer to utils; fix circular deps"
This reverts commit c3d414dd1fd062dafe0806851094a953530a7fdd.
-rwxr-xr-xkbas/__main__.py13
-rwxr-xr-xybd/__main__.py60
-rw-r--r--ybd/app.py49
-rw-r--r--ybd/assembly.py46
-rw-r--r--ybd/cache.py36
-rwxr-xr-xybd/concourse.py8
-rw-r--r--ybd/config.py1
-rw-r--r--ybd/defaults.py3
-rw-r--r--ybd/deployment.py14
-rw-r--r--ybd/morphs.py3
-rw-r--r--ybd/release_note.py3
-rw-r--r--ybd/repos.py24
-rw-r--r--ybd/sandbox.py32
-rw-r--r--ybd/splitting.py48
-rw-r--r--ybd/utils.py73
15 files changed, 200 insertions, 213 deletions
diff --git a/kbas/__main__.py b/kbas/__main__.py
index 3eecd89..204c7b9 100755
--- a/kbas/__main__.py
+++ b/kbas/__main__.py
@@ -86,15 +86,14 @@ class KeyedBinaryArtifactServer(object):
@bottle.get('/1.0/artifacts')
def get_morph_artifact():
f = request.query.filename
- return static_file(f, root=config.config['artifact-dir'],
- download=True)
+ return static_file(f, root=config.config['artifact-dir'], download=True)
@bottle.get('/get/<cache_id>')
def get_artifact(cache_id):
f = os.path.join(cache_id, cache_id)
config.config['downloads'] += 1
- return static_file(f, root=config.config['artifact-dir'],
- download=True, mimetype='application/x-tar')
+ return static_file(f, root=config.config['artifact-dir'], download=True,
+ mimetype='application/x-tar')
@bottle.get('/')
@bottle.get('/status')
@@ -104,8 +103,7 @@ class KeyedBinaryArtifactServer(object):
artifacts = len(os.listdir(config.config['artifact-dir']))
started = config.config['start-time'].strftime('%y-%m-%d %H:%M:%S')
downloads = config.config['downloads']
- last_upload = config.config['last-upload'].strftime(
- '%y-%m-%d %H:%M:%S')
+ last_upload = config.config['last-upload'].strftime('%y-%m-%d %H:%M:%S')
content = [['Started:', started, None]]
content += [['Last upload:', last_upload, None]]
if config.config.get('last-reject'):
@@ -133,8 +131,7 @@ class KeyedBinaryArtifactServer(object):
response.status = 400 # bad request, cache_id contains bad things
return
- if os.path.isdir(os.path.join(config.config['artifact-dir'],
- cache_id)):
+ if os.path.isdir(os.path.join(config.config['artifact-dir'], cache_id)):
if cache.check(cache_id) == request.forms.get('checksum', 'XYZ'):
response.status = 777 # this is the same binary we have
return
diff --git a/ybd/__main__.py b/ybd/__main__.py
index 68553f8..7fb529e 100755
--- a/ybd/__main__.py
+++ b/ybd/__main__.py
@@ -21,25 +21,23 @@ import os
import sys
import fcntl
from ybd import app, cache, sandbox
-from ybd.app import cleanup, RetryException, setup, spawn
+from ybd.app import cleanup, RetryException, setup, spawn, timer
from ybd.assembly import compose
-from ybd import config
+from ybd.config import config
from ybd.deployment import deploy
from ybd.pots import Pots
from ybd.concourse import Pipeline
from ybd.release_note import do_release_note
-from ybd.utils import log, timer
import sandboxlib
import yaml
def write_cache_key():
- with open(config.config['result-file'], 'w') as f:
+ with open(config['result-file'], 'w') as f:
f.write(target['cache'] + '\n')
for kind in ['systems', 'strata', 'chunks']:
- log('COUNT', '%s has %s %s' % (config.config['target'],
- config.config[kind], kind))
- log('RESULT', 'Cache-key for target is at', config.config['result-file'])
+ log('COUNT', '%s has %s %s' % (config['target'], config[kind], kind))
+ log('RESULT', 'Cache-key for target is at', config['result-file'])
print('')
@@ -53,46 +51,46 @@ if not os.path.exists('./VERSION'):
os.chdir(os.path.join(os.getcwd(), '..', 'definitions'))
setup(sys.argv, original_cwd)
-cleanup(config.config['tmp'])
+cleanup(config['tmp'])
with timer('TOTAL'):
- tmp_lock = open(os.path.join(config.config['tmp'], 'lock'), 'r')
+ tmp_lock = open(os.path.join(config['tmp'], 'lock'), 'r')
fcntl.flock(tmp_lock, fcntl.LOCK_SH | fcntl.LOCK_NB)
- target = os.path.join(config.config['defdir'], config.config['target'])
- log('TARGET', 'Target is %s' % target, config.config['arch'])
- with timer('DEFINITIONS', 'parsing %s' % config.config['def-version']):
- config.defs = Pots()
+ target = os.path.join(config['defdir'], config['target'])
+ log('TARGET', 'Target is %s' % target, config['arch'])
+ with timer('DEFINITIONS', 'parsing %s' % config['def-version']):
+ app.defs = Pots()
- target = config.defs.get(config.config['target'])
- if config.config.get('mode', 'normal') == 'parse-only':
+ target = app.defs.get(config['target'])
+ if config.get('mode', 'normal') == 'parse-only':
Pipeline(target)
os._exit(0)
with timer('CACHE-KEYS', 'cache-key calculations'):
cache.cache_key(target)
- if 'release-note' in config.config:
- do_release_note(config.config['release-note'])
+ if 'release-note' in config:
+ do_release_note(config['release-note'])
- if config.config['total'] == 0 or (config.config['total'] == 1 and
- target.get('kind') == 'cluster'):
- log('ARCH', 'No definitions for', config.config['arch'], exit=True)
+ if config['total'] == 0 or (config['total'] == 1 and
+ target.get('kind') == 'cluster'):
+ log('ARCH', 'No definitions for', config['arch'], exit=True)
- config.defs.save_trees()
- if config.config.get('mode', 'normal') == 'keys-only':
+ app.defs.save_trees()
+ if config.get('mode', 'normal') == 'keys-only':
write_cache_key()
os._exit(0)
- cache.cull(config.config['artifacts'])
+ cache.cull(config['artifacts'])
sandbox.executor = sandboxlib.executor_for_platform()
- log(config.config['target'], 'Sandbox using %s' % sandbox.executor)
+ log(config['target'], 'Sandbox using %s' % sandbox.executor)
if sandboxlib.chroot == sandbox.executor:
- log(config.config['target'], 'WARNING: using chroot is less safe ' +
+ log(config['target'], 'WARNING: using chroot is less safe ' +
'than using linux-user-chroot')
- if 'instances' in config.config:
+ if 'instances' in config:
spawn()
while True:
@@ -110,12 +108,12 @@ with timer('TOTAL'):
log(target, 'Exiting: uncaught exception')
os._exit(1)
- if config.config.get('reproduce'):
- log('REPRODUCED', 'Matched %s of' %
- len(config.config['reproduced']), config.config['tasks'])
- for match in config.config['reproduced']:
+ if config.get('reproduce'):
+ log('REPRODUCED',
+ 'Matched %s of' % len(config['reproduced']), config['tasks'])
+ for match in config['reproduced']:
print(match[0], match[1])
- if target.get('kind') == 'cluster' and config.config.get('fork') is None:
+ if target.get('kind') == 'cluster' and config.get('fork') is None:
with timer(target, 'cluster deployment'):
deploy(target)
diff --git a/ybd/app.py b/ybd/app.py
index ea78e43..7a3e869 100644
--- a/ybd/app.py
+++ b/ybd/app.py
@@ -29,7 +29,15 @@ from ybd.repos import get_version
from ybd.cache import cache_key
from ybd.utils import log
from ybd.config import config
-from ybd import utils
+try:
+ from riemann_client.transport import TCPTransport
+ from riemann_client.client import QueuedClient
+ riemann_available = True
+except ImportError:
+ riemann_available = False
+
+
+defs = {}
class RetryException(Exception):
@@ -221,6 +229,45 @@ def remove_dir(tmpdir):
log('SETUP', 'WARNING: unable to remove', tmpdir)
+@contextlib.contextmanager
+def chdir(dirname=None):
+ currentdir = os.getcwd()
+ try:
+ if dirname is not None:
+ os.chdir(dirname)
+ yield
+ finally:
+ os.chdir(currentdir)
+
+
+@contextlib.contextmanager
+def timer(dn, message=''):
+ starttime = datetime.datetime.now()
+ log(dn, 'Starting ' + message)
+ if type(dn) is dict:
+ dn['start-time'] = starttime
+ try:
+ yield
+ except:
+ raise
+ text = '' if message == '' else ' for ' + message
+ time_elapsed = utils.elapsed(starttime)
+ log(dn, 'Elapsed time' + text, time_elapsed)
+ log_riemann(dn, 'Timer', text, time_elapsed)
+
+
+def log_riemann(dn, service, text, time_elapsed):
+ if riemann_available and 'riemann-server' in config:
+ time_split = time_elapsed.split(':')
+ time_sec = int(time_split[0]) * 3600 \
+ + int(time_split[1]) * 60 + int(time_split[2])
+ with QueuedClient(TCPTransport(config['riemann-server'],
+ config['riemann-port'],
+ timeout=30)) as client:
+ client.event(service=service, description=text, metric_f=time_sec)
+ client.flush()
+
+
def spawn():
for fork in range(1, config.get('instances')):
if os.fork() == 0:
diff --git a/ybd/assembly.py b/ybd/assembly.py
index 5f0fb41..b8ba4aa 100644
--- a/ybd/assembly.py
+++ b/ybd/assembly.py
@@ -20,19 +20,19 @@ import contextlib
import fcntl
import errno
-from ybd import app, repos, sandbox, config
-from ybd.app import lockfile, RetryException
+from ybd import app, repos, sandbox
+from ybd.app import config, timer
+from ybd.app import log, log_riemann, lockfile, RetryException
from ybd.cache import cache, cache_key, get_cache, get_remote
import datetime
from ybd.splitting import write_metadata, install_split_artifacts
-from ybd.utils import log, log_riemann, elapsed, timer
-
+from ybd.utils import elapsed
def compose(dn):
'''Work through defs tree, building and assembling until target exists'''
if type(dn) is not dict:
- dn = config.defs.get(dn)
+ dn = app.defs.get(dn)
# if we can't calculate cache key, we can't create this component
if cache_key(dn) is False:
@@ -48,15 +48,14 @@ def compose(dn):
log(dn, "Composing", dn['name'], verbose=True)
# if we have a kbas, look there to see if this component exists
- if config.config.get('kbas-url') and not \
- config.config.get('reproduce'):
+ if config.get('kbas-url') and not config.get('reproduce'):
with claim(dn):
if get_remote(dn):
- config.config['counter'].increment()
+ config['counter'].increment()
return cache_key(dn)
# we only work with user-specified arch
- if 'arch' in dn and dn['arch'] != config.config['arch']:
+ if 'arch' in dn and dn['arch'] != config['arch']:
return None
# Create composite components (strata, systems, clusters)
@@ -64,7 +63,7 @@ def compose(dn):
shuffle(systems)
for system in systems:
for s in system.get('subsystems', []):
- subsystem = config.defs.get(s['path'])
+ subsystem = app.defs.get(s['path'])
compose(subsystem)
compose(system['path'])
@@ -85,7 +84,7 @@ def install_contents(dn, contents=None):
shuffle(contents)
for it in contents:
- item = config.defs.get(it)
+ item = app.defs.get(it)
if os.path.exists(os.path.join(dn['sandbox'],
'baserock', item['name'] + '.meta')):
# content has already been installed
@@ -100,7 +99,7 @@ def install_contents(dn, contents=None):
compose(item)
sandbox.install(dn, item)
- if config.config.get('log-verbose'):
+ if config.get('log-verbose'):
log(dn, 'Added contents\n', contents)
sandbox.list_files(dn)
@@ -114,7 +113,7 @@ def install_dependencies(dn, dependencies=None):
log(dn, 'Installing dependencies\n', dependencies, verbose=True)
shuffle(dependencies)
for it in dependencies:
- dependency = config.defs.get(it)
+ dependency = app.defs.get(it)
if os.path.exists(os.path.join(dn['sandbox'], 'baserock',
dependency['name'] + '.meta')):
# dependency has already been installed
@@ -129,7 +128,7 @@ def install_dependencies(dn, dependencies=None):
if dependency.get('contents'):
install_dependencies(dn, dependency['contents'])
sandbox.install(dn, dependency)
- if config.config.get('log-verbose'):
+ if config.get('log-verbose'):
sandbox.list_files(dn)
@@ -155,12 +154,12 @@ def build(dn):
def run_build(dn):
- ''' This is where we run ./config.configure, make, make install (for example).
+ ''' This is where we run ./configure, make, make install (for example).
By the time we get here, all dependencies for component have already
been assembled.
'''
- if config.config.get('mode', 'normal') == 'no-build':
+ if config.get('mode', 'normal') == 'no-build':
log(dn, 'SKIPPING BUILD: artifact will be empty')
return
@@ -175,7 +174,7 @@ def run_build(dn):
env_vars = sandbox.env_vars_for_build(dn)
log(dn, 'Logging build commands to %s' % dn['log'])
- for build_step in config.defs.defaults.build_steps:
+ for build_step in app.defs.defaults.build_steps:
if dn.get(build_step):
log(dn, 'Running', build_step)
for command in dn.get(build_step, []):
@@ -193,7 +192,7 @@ def run_build(dn):
def shuffle(contents):
- if config.config.get('instances', 1) > 1:
+ if config.get('instances', 1) > 1:
random.seed(datetime.datetime.now())
random.shuffle(contents)
@@ -242,13 +241,13 @@ def get_build_commands(dn):
dn['install-commands'] = gather_integration_commands(dn)
return
- exit = True if config.config.get('check-definitions') == 'exit' else False
+ exit = True if config.get('check-definitions') == 'exit' else False
if 'build-system' in dn:
bs = dn['build-system']
log(dn, 'Defined build system is', bs)
else:
files = os.listdir(dn['checkout'])
- bs = config.defs.defaults.detect_build_system(files)
+ bs = app.defs.defaults.detect_build_system(files)
if bs == 'manual' and 'install-commands' not in dn:
if dn.get('kind', 'chunk') == 'chunk':
print(dn)
@@ -256,10 +255,9 @@ def get_build_commands(dn):
exit=exit)
log(dn, 'WARNING: Assumed build system is', bs)
- for build_step in config.defs.defaults.build_steps:
+ for build_step in app.defs.defaults.build_steps:
if dn.get(build_step, None) is None:
- commands = config.defs.defaults.build_systems[bs].get(
- build_step, [])
+ commands = app.defs.defaults.build_systems[bs].get(build_step, [])
dn[build_step] = commands
@@ -275,7 +273,7 @@ def gather_integration_commands(dn):
for name, cmdseq in it.items():
commands["%s-%s" % (name, product)] = cmdseq
for subcomponent in component.get('contents', []):
- _gather_recursively(config.defs.get(subcomponent), commands)
+ _gather_recursively(app.defs.get(subcomponent), commands)
all_commands = {}
_gather_recursively(dn, all_commands)
diff --git a/ybd/cache.py b/ybd/cache.py
index c66510c..ae6b52a 100644
--- a/ybd/cache.py
+++ b/ybd/cache.py
@@ -22,7 +22,7 @@ import os
import shutil
from subprocess import call
-from ybd import utils, config
+from ybd import utils
from ybd.utils import log
from ybd.repos import get_repo_url, get_tree
import tempfile
@@ -35,7 +35,7 @@ def cache_key(dn):
log(dn, 'No definition found for', dn, exit=True)
if type(dn) is not dict:
- dn = config.defs.get(dn)
+ dn = app.defs.get(dn)
if dn.get('cache') == 'calculating':
log(dn, 'Recursion loop for', dn, exit=True)
@@ -94,8 +94,7 @@ def hash_factors(dn):
key = list(factor.keys())[0]
hash_factors[key] = cache_key(key)
- relevant_factors = ['tree', 'submodules'] + \
- config.defs.defaults.build_steps
+ relevant_factors = ['tree', 'submodules'] + app.defs.defaults.build_steps
if config.config.get('artifact-version', False) not in range(0, 6):
relevant_factors += ['devices']
@@ -118,21 +117,20 @@ def hash_factors(dn):
hash_system_recursively(system)
if config.config.get('artifact-version', False):
- hash_factors['artifact-version'] = config.config['artifact-version']
+ hash_factors['artifact-version'] = config.config.get('artifact-version')
if config.config.get('artifact-version', 0) in range(0, 2):
# this way, any change to any build-system invalidates all caches
hash_factors['default-build-systems'] = \
- config.defs.defaults.build_systems
+ app.defs.defaults.build_systems
else:
# this way is better - only affected components get a new key
hash_factors['default-build-systems'] = \
- config.defs.defaults.build_systems.get(dn.get('build-system',
- 'manual'))
+ app.defs.defaults.build_systems.get(dn.get('build-system',
+ 'manual'))
if (config.config.get('default-splits', []) != [] and
dn.get('kind') == 'system'):
- hash_factors['default-splits'] = \
- config.config['default-splits']
+ hash_factors['default-splits'] = config.config['default-splits']
if config.config.get('artifact-version', 0) not in range(0, 7):
if dn.get('max-jobs'):
@@ -167,9 +165,8 @@ def cache(dn):
if config.config.get('kbas-password', 'insecure') != 'insecure' and \
config.config.get('kbas-url') is not None:
- if dn.get('kind', 'chunk') in \
- config.config.get('kbas-upload', 'chunk'):
- with utils.timer(dn, 'upload'):
+ if dn.get('kind', 'chunk') in config.config.get('kbas-upload', 'chunk'):
+ with app.timer(dn, 'upload'):
upload(dn)
@@ -237,14 +234,13 @@ def upload(dn):
return
if response.status_code == 777:
log(dn, 'Reproduced %s at' % md5(cachefile), dn['cache'])
- config.config['reproduced'].append(
- [md5(cachefile), dn['cache']])
+ config.config['reproduced'].append([md5(cachefile), dn['cache']])
return
if response.status_code == 405:
# server has different md5 for this artifact
if dn['kind'] == 'stratum' and config.config['reproduce']:
log('BIT-FOR-BIT',
- 'WARNING: reproduction failed for', dn['cache'])
+ 'WARNING: reproduction failed for', dn['cache'])
log(dn, 'Artifact server already has', dn['cache'])
return
log(dn, 'Artifact server problem:', response.status_code)
@@ -289,8 +285,7 @@ def get_remote(dn):
dn['tried'] = True # let's not keep asking for this artifact
- if dn.get('kind', 'chunk') not in \
- config.config.get('kbas-upload', 'chunk'):
+ if dn.get('kind', 'chunk') not in config.config.get('kbas-upload', 'chunk'):
return False
try:
@@ -330,7 +325,8 @@ def cull(artifact_dir):
if free >= config.config.get('min-gigabytes', 10):
log('SETUP', '%sGB is enough free space' % free)
if deleted > 0:
- log('SETUP', 'Culled %s items in' % deleted, artifact_dir)
+ log('SETUP', 'Culled %s items in' % deleted,
+ artifact_dir)
return True
path = os.path.join(artifact_dir, artifact)
if os.path.exists(os.path.join(path, artifact + '.unpacked')):
@@ -354,7 +350,7 @@ def cull(artifact_dir):
free = stat.f_frsize * stat.f_bavail / 1000000000
if free < config.config.get('min-gigabytes', 10):
log('SETUP', '%sGB is less than min-gigabytes:' % free,
- config.config.get('min-gigabytes', 10), exit=True)
+ config.config.get('min-gigabytes', 10), exit=True)
def check(artifact):
diff --git a/ybd/concourse.py b/ybd/concourse.py
index 5c6b577..8a230d2 100755
--- a/ybd/concourse.py
+++ b/ybd/concourse.py
@@ -16,7 +16,7 @@
import yaml
import ybd.app
-from ybd.utils import log, timer
+from ybd.app import log, timer, defs
# Concourse data model:
# a 'resource' is an input line into a box
@@ -35,18 +35,18 @@ class Pipeline(object):
'image': 'docker:///devcurmudgeon/foo'}
self.write_pipeline(dn)
- output = config.defs.get(dn)['name'] + '.yml'
+ output = app.defs.get(dn)['name'] + '.yml'
with open(output, 'w') as f:
pipeline = {'resources': self.resources, 'jobs': self.jobs}
f.write(yaml.dump(pipeline, default_flow_style=False))
log('CONCOURSE', 'pipeline is at', output)
def write_pipeline(self, dn):
- dn = config.defs.get(dn)
+ dn = app.defs.get(dn)
self.add_resource(dn)
aggregate = []
for it in dn.get('build-depends', []) + dn.get('contents', []):
- component = config.defs.get(it)
+ component = app.defs.get(it)
self.add_resource(component)
if component.get('kind', 'chunk') == 'chunk':
aggregate += [{'get': component['name']}]
diff --git a/ybd/config.py b/ybd/config.py
index fc8f3a7..fcd8c8e 100644
--- a/ybd/config.py
+++ b/ybd/config.py
@@ -1,2 +1 @@
config = {}
-defs = {}
diff --git a/ybd/defaults.py b/ybd/defaults.py
index d3b5435..8d26f14 100644
--- a/ybd/defaults.py
+++ b/ybd/defaults.py
@@ -25,7 +25,8 @@ These definitions shall be used if no DEFAULTS file is present.
'''
import os
-from ybd import app, config
+from ybd import app
+from ybd.config import config
from ybd.utils import log
import yaml
diff --git a/ybd/deployment.py b/ybd/deployment.py
index 72103fa..e8bee47 100644
--- a/ybd/deployment.py
+++ b/ybd/deployment.py
@@ -17,7 +17,7 @@
import os
from subprocess import call
import json
-from ybd import app, cache, config, sandbox, utils
+from ybd import app, cache, sandbox
from ybd.utils import log
@@ -25,8 +25,8 @@ def deploy(target):
'''Deploy a cluster definition.'''
arch = config.config['arch']
for system in target.get('systems', []):
- if config.defs.get(system).get('arch', arch) == arch:
- with utils.timer(system, 'deployment'):
+ if app.defs.get(system).get('arch', arch) == arch:
+ with app.timer(system, 'deployment'):
deploy_system(system)
@@ -40,10 +40,10 @@ def deploy_system(system_spec, parent_location=''):
the result being used as the location for the deployment extensions.
'''
- system = config.defs.get(system_spec['path'])
+ system = app.defs.get(system_spec['path'])
if not cache.get_cache(system):
log('DEPLOY', 'System is not built, cannot deploy:\n', system,
- exit=True)
+ exit=True)
deploy_defaults = system_spec.get('deploy-defaults')
with sandbox.setup(system):
@@ -84,6 +84,6 @@ def do_deployment_manifest(system, configuration):
log(system, "Creating deployment manifest in", system['sandbox'])
data = {'configuration': configuration}
metafile = os.path.join(system['sandbox'], 'baserock', 'deployment.meta')
- with utils.chdir(system['sandbox']), open(metafile, "w") as f:
- json.dump(data, f, indent=4, sort_keys=True)
+ with app.chdir(system['sandbox']), open(metafile, "w") as f:
+ json.dump(data, f, indent=4, sort_keys=True, encoding='unicode-escape')
f.flush()
diff --git a/ybd/morphs.py b/ybd/morphs.py
index c108377..edc29db 100644
--- a/ybd/morphs.py
+++ b/ybd/morphs.py
@@ -17,9 +17,10 @@
import yaml
import glob
import os
+from ybd.app import chdir
from ybd.config import config
from ybd.defaults import Defaults
-from ybd.utils import chdir, log
+from ybd.utils import log
class Morphs(object):
diff --git a/ybd/release_note.py b/ybd/release_note.py
index 53a4a90..fe6381f 100644
--- a/ybd/release_note.py
+++ b/ybd/release_note.py
@@ -18,11 +18,12 @@ import os
from subprocess import check_output
import tempfile
from ybd import app
+from ybd.app import chdir
from ybd.config import config
from ybd.morphs import Morphs
from ybd.repos import explore, get_last_tag, get_repo_name
from ybd.repos import mirror, mirror_has_ref
-from ybd.utils import log, chdir
+from ybd.utils import log
def do_release_note(release_note):
diff --git a/ybd/repos.py b/ybd/repos.py
index 8ab6e52..ff2303e 100644
--- a/ybd/repos.py
+++ b/ybd/repos.py
@@ -22,7 +22,7 @@ import string
from subprocess import call, check_output
import sys
import requests
-from ybd import utils, config
+from ybd import utils
from ybd.utils import log
import tempfile
@@ -63,7 +63,7 @@ def get_repo_name(repo):
def get_version(gitdir, ref='HEAD'):
try:
- with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with app.chdir(gitdir), open(os.devnull, "w") as fnull:
version = check_output(['git', 'describe', '--tags', '--dirty'],
stderr=fnull)[0:-1]
tag = check_output(['git', 'describe', '--abbrev=0',
@@ -79,7 +79,7 @@ def get_version(gitdir, ref='HEAD'):
def get_last_tag(gitdir):
try:
- with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with app.chdir(gitdir), open(os.devnull, "w") as fnull:
tag = check_output(['git', 'describe', '--abbrev=0',
'--tags', 'HEAD'], stderr=fnull)[0:-1]
return tag
@@ -106,7 +106,7 @@ def get_tree(dn):
mirror(dn['name'], dn['repo'])
- with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with app.chdir(gitdir), open(os.devnull, "w") as fnull:
if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,
stderr=fnull):
# can't resolve ref. is it upstream?
@@ -132,7 +132,7 @@ def mirror(name, repo):
tar_file = get_repo_name(repo_url) + '.tar'
log(name, 'Try fetching tarball %s' % tar_file)
# try tarball first
- with utils.chdir(tmpdir), open(os.devnull, "w") as fnull:
+ with app.chdir(tmpdir), open(os.devnull, "w") as fnull:
call(['wget', os.path.join(config.config['tar-url'], tar_file)],
stdout=fnull, stderr=fnull)
call(['tar', 'xf', tar_file], stderr=fnull)
@@ -145,7 +145,7 @@ def mirror(name, repo):
if call(['git', 'clone', '--mirror', '-n', repo_url, tmpdir]):
log(name, 'Failed to clone', repo, exit=True)
- with utils.chdir(tmpdir):
+ with app.chdir(tmpdir):
if call(['git', 'rev-parse']):
log(name, 'Problem mirroring git repo at', tmpdir, exit=True)
@@ -158,18 +158,18 @@ def mirror(name, repo):
def fetch(repo):
- with utils.chdir(repo), open(os.devnull, "w") as fnull:
+ with app.chdir(repo), open(os.devnull, "w") as fnull:
call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)
def mirror_has_ref(gitdir, ref):
- with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with app.chdir(gitdir), open(os.devnull, "w") as fnull:
out = call(['git', 'cat-file', '-t', ref], stdout=fnull, stderr=fnull)
return out == 0
def update_mirror(name, repo, gitdir):
- with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with app.chdir(gitdir), open(os.devnull, "w") as fnull:
log(name, 'Refreshing mirror for %s' % repo)
repo_url = get_repo_url(repo)
if call(['git', 'fetch', repo_url, '+refs/*:refs/*', '--prune'],
@@ -180,7 +180,7 @@ def update_mirror(name, repo, gitdir):
def checkout(dn):
_checkout(dn['name'], dn['repo'], dn['ref'], dn['checkout'])
- with utils.chdir(dn['checkout']):
+ with app.chdir(dn['checkout']):
if os.path.exists('.gitmodules') or dn.get('submodules'):
checkout_submodules(dn)
@@ -204,7 +204,7 @@ def _checkout(name, repo, ref, checkout):
stdout=fnull, stderr=fnull):
log(name, 'Git clone failed for', gitdir, exit=True)
- with utils.chdir(checkout):
+ with app.chdir(checkout):
if call(['git', 'checkout', '--force', ref], stdout=fnull,
stderr=fnull):
log(name, 'Git checkout failed for', ref, exit=True)
@@ -214,7 +214,7 @@ def _checkout(name, repo, ref, checkout):
def source_date_epoch(checkout):
- with utils.chdir(checkout):
+ with app.chdir(checkout):
return check_output(['git', 'log', '-1', '--pretty=%ct'])[:-1]
diff --git a/ybd/sandbox.py b/ybd/sandbox.py
index c900810..4e40d44 100644
--- a/ybd/sandbox.py
+++ b/ybd/sandbox.py
@@ -24,7 +24,7 @@ import stat
import tempfile
from subprocess import call, PIPE
-from ybd import app, cache, utils, config
+from ybd import app, cache, utils
from ybd.repos import get_repo_url
from ybd.utils import log
@@ -116,7 +116,7 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
writable_paths = [dn['checkout'], dn['install'], tmpdir, ]
- cfg = dict(
+ config = dict(
cwd=dn['checkout'],
filesystem_root='/',
filesystem_writable_paths=writable_paths,
@@ -137,7 +137,7 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
writable_paths = [dn['name'] + '.build', dn['name'] + '.inst',
'/dev', '/proc', '/tmp', ]
- cfg = dict(
+ config = dict(
cwd=dn['name'] + '.build',
filesystem_root=dn['sandbox'],
filesystem_writable_paths=writable_paths,
@@ -160,25 +160,25 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
# Adjust config for what the backend is capable of. The user will be warned
# about any changes made.
- cfg = executor.degrade_config_for_capabilities(cfg, warn=False)
+ config = executor.degrade_config_for_capabilities(config, warn=False)
try:
if not allow_parallel:
env.pop("MAKEFLAGS", None)
- utils.log_env(dn['log'], env, argv_to_string(argv))
+ app.log_env(dn['log'], env, argv_to_string(argv))
with open(dn['log'], "a") as logfile:
exit_code = 99
try:
exit_code = executor.run_sandbox_with_redirection(
argv, stdout=logfile, stderr=sandboxlib.STDOUT,
- env=env, **cfg)
+ env=env, **config)
except:
import traceback
traceback.print_exc()
log('SANDBOX', 'ERROR: in run_sandbox_with_redirection',
- exit_code)
+ exit_code)
if exit_code != 0:
log(dn, 'ERROR: command failed in directory %s:\n\n' %
@@ -192,7 +192,7 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
def run_logged(dn, cmd_list):
- utils.log_env(dn['log'], os.environ, argv_to_string(cmd_list))
+ app.log_env(dn['log'], os.environ, argv_to_string(cmd_list))
with open(dn['log'], "a") as logfile:
if call(cmd_list, stdin=PIPE, stdout=logfile, stderr=logfile):
log(dn, 'ERROR: command failed in directory %s:\n\n' %
@@ -229,11 +229,10 @@ def run_extension(dn, deployment, step, method):
command.append(deployment.get('location') or
deployment.get('upgrade-location'))
- with utils.chdir(config.config['defdir']):
+ with app.chdir(config.config['defdir']):
try:
with open(cmd_bin, "r") as infh:
- with open(cmd_tmp.name, "w") as outfh:
- shutil.copyfileobj(infh, outfh)
+ shutil.copyfileobj(infh, cmd_tmp)
cmd_tmp.close()
os.chmod(cmd_tmp.name, 0o700)
@@ -279,7 +278,7 @@ def env_vars_for_build(dn):
prefixes = []
for name in dn.get('build-depends', []):
- dependency = config.defs.get(name)
+ dependency = app.defs.get(name)
prefixes.append(dependency.get('prefix', '/usr'))
prefixes = set(prefixes)
for prefix in prefixes:
@@ -298,8 +297,7 @@ def env_vars_for_build(dn):
env['PATH'] = ':'.join(path)
env['PREFIX'] = dn.get('prefix') or '/usr'
- env['MAKEFLAGS'] = '-j%s' % (dn.get('max-jobs') or
- config.config['max-jobs'])
+ env['MAKEFLAGS'] = '-j%s' % (dn.get('max-jobs') or config.config['max-jobs'])
env['TERM'] = 'dumb'
env['SHELL'] = '/bin/sh'
env['USER'] = env['USERNAME'] = env['LOGNAME'] = 'tomjon'
@@ -345,11 +343,11 @@ def create_devices(dn):
def list_files(component):
log(component, 'Sandbox %s contains\n' % component['sandbox'],
- os.listdir(component['sandbox']))
+ os.listdir(component['sandbox']))
try:
files = os.listdir(os.path.join(component['sandbox'], 'baserock'))
log(component,
- 'Baserock directory contains %s items\n' % len(files),
- sorted(files))
+ 'Baserock directory contains %s items\n' % len(files),
+ sorted(files))
except:
log(component, 'No baserock directory in', component['sandbox'])
diff --git a/ybd/splitting.py b/ybd/splitting.py
index f435b9a..1fa7558 100644
--- a/ybd/splitting.py
+++ b/ybd/splitting.py
@@ -14,12 +14,13 @@
#
# =*= License: GPL-2 =*=
-from ybd import app, config
+from ybd import app
+from ybd.app import config, chdir
from ybd.cache import get_cache
import os
import re
import yaml
-from ybd.utils import chdir, copy_file_list, log
+from ybd.utils import copy_file_list, log
def install_split_artifacts(dn):
@@ -30,9 +31,10 @@ def install_split_artifacts(dn):
sandbox to the dn['install']
'''
+
for content in dn['contents']:
key = list(content.keys())[0]
- stratum = config.defs.get(key)
+ stratum = app.defs.get(key)
move_required_files(dn, stratum, content[key])
@@ -66,7 +68,7 @@ def move_required_files(dn, stratum, artifacts):
yaml.safe_dump(split_stratum_metadata, f, default_flow_style=False)
for path in stratum['contents']:
- chunk = config.defs.get(path)
+ chunk = app.defs.get(path)
if chunk.get('build-mode', 'staging') == 'bootstrap':
continue
@@ -78,7 +80,7 @@ def move_required_files(dn, stratum, artifacts):
split_metadata = {'ref': metadata.get('ref'),
'repo': metadata.get('repo'),
'products': []}
- if config.config.get('artifact-version', 0) not in range(0, 1):
+ if config.get('artifact-version', 0) not in range(0, 1):
metadata['cache'] = dn.get('cache')
for product in metadata['products']:
@@ -106,13 +108,13 @@ def move_required_files(dn, stratum, artifacts):
def check_overlaps(dn):
- if set(config.config['new-overlaps']) <= set(config.config['overlaps']):
- config.config['new-overlaps'] = []
+ if set(config['new-overlaps']) <= set(config['overlaps']):
+ config['new-overlaps'] = []
return
overlaps_found = False
- config.config['new-overlaps'] = list(set(config.config['new-overlaps']))
- for path in config.config['new-overlaps']:
+ config['new-overlaps'] = list(set(config['new-overlaps']))
+ for path in config['new-overlaps']:
log(dn, 'WARNING: overlapping path', path)
for filename in os.listdir(dn['baserockdir']):
with open(os.path.join(dn['baserockdir'], filename)) as f:
@@ -121,11 +123,10 @@ def check_overlaps(dn):
log(filename, 'WARNING: overlap at', path[1:])
overlaps_found = True
break
- if config.config.get('check-overlaps') == 'exit':
- log(dn, 'Overlaps found', config.config['new-overlaps'], exit=True)
- config.config['overlaps'] = list(set(config.config['new-overlaps'] +
- config.config['overlaps']))
- config.config['new-overlaps'] = []
+ if config.get('check-overlaps') == 'exit':
+ log(dn, 'Overlaps found', config['new-overlaps'], exit=True)
+ config['overlaps'] = list(set(config['new-overlaps'] + config['overlaps']))
+ config['new-overlaps'] = []
def get_metadata(dn):
@@ -156,8 +157,7 @@ def compile_rules(dn):
regexps = []
splits = {}
split_rules = dn.get('products', [])
- default_rules = config.defs.defaults.get_split_rules(
- dn.get('kind', 'chunk'))
+ default_rules = app.defs.defaults.get_split_rules(dn.get('kind', 'chunk'))
for rules in split_rules, default_rules:
for rule in rules:
regexp = re.compile('^(?:' + '|'.join(rule.get('include')) + ')$')
@@ -175,7 +175,7 @@ def write_metadata(dn):
write_chunk_metafile(dn)
elif dn.get('kind', 'chunk') == 'stratum':
write_stratum_metafiles(dn)
- if config.config.get('check-overlaps', 'ignore') != 'ignore':
+ if config.get('check-overlaps', 'ignore') != 'ignore':
check_overlaps(dn)
@@ -214,7 +214,7 @@ def write_stratum_metafiles(stratum):
rules, splits = compile_rules(stratum)
for item in stratum['contents']:
- chunk = config.defs.get(item)
+ chunk = app.defs.get(item)
if chunk.get('build-mode', 'staging') == 'bootstrap':
continue
@@ -223,10 +223,10 @@ def write_stratum_metafiles(stratum):
'repo': metadata.get('repo'),
'products': []}
- if config.config.get('artifact-version', 0) not in range(0, 1):
+ if config.get('artifact-version', 0) not in range(0, 1):
split_metadata['cache'] = metadata.get('cache')
- chunk_artifacts = config.defs.get(chunk).get('artifacts', {})
+ chunk_artifacts = app.defs.get(chunk).get('artifacts', {})
for artifact, target in chunk_artifacts.items():
splits[target].append(artifact)
@@ -254,11 +254,11 @@ def write_metafile(rules, splits, dn):
metadata['repo'] = dn.get('repo')
metadata['ref'] = dn.get('ref')
else:
- if config.config.get('artifact-version', 0) not in range(0, 2):
- metadata['repo'] = config.config['defdir']
- metadata['ref'] = config.config['def-version']
+ if config.get('artifact-version', 0) not in range(0, 2):
+ metadata['repo'] = config['defdir']
+ metadata['ref'] = config['def-version']
- if config.config.get('artifact-version', 0) not in range(0, 1):
+ if config.get('artifact-version', 0) not in range(0, 1):
metadata['cache'] = dn.get('cache')
meta = os.path.join(dn['baserockdir'], dn['name'] + '.meta')
diff --git a/ybd/utils.py b/ybd/utils.py
index 34791e6..ee57aaf 100644
--- a/ybd/utils.py
+++ b/ybd/utils.py
@@ -25,14 +25,7 @@ import sys
from fs.osfs import OSFS
from fs.multifs import MultiFS
import calendar
-from ybd import config
-
-try:
- from riemann_client.transport import TCPTransport
- from riemann_client.client import QueuedClient
- riemann_available = True
-except ImportError:
- riemann_available = False
+from ybd.config import config
# The magic number for timestamps: 2011-11-11 11:11:11
default_magic_timestamp = calendar.timegm([2011, 11, 11, 11, 11, 11])
@@ -372,7 +365,7 @@ def make_deterministic_tar_archive(base_name, root):
'''
- with chdir(root), open(base_name + '.tar', 'wb') as f:
+ with app.chdir(root), open(base_name + '.tar', 'wb') as f:
with tarfile.TarFile(mode='w', fileobj=f) as f_tar:
directories = [d[0] for d in os.walk('.')]
for d in sorted(directories):
@@ -387,10 +380,8 @@ def _find_extensions(paths):
the return dict.'''
extension_kinds = ['check', 'configure', 'write']
- tfs = OSFS(paths[0])
efs = MultiFS()
- for x in paths:
- efs.addfs(x, OSFS(x))
+ map(lambda x: efs.addfs(x, OSFS(x)), paths)
def get_extensions(kind):
return {os.path.splitext(x)[0]: efs.getsyspath(x)
@@ -437,24 +428,23 @@ def log(dn, message='', data='', verbose=False, exit=False):
print('\n\n')
message = 'ERROR: ' + message.replace('WARNING: ', '')
- if verbose is True and config.config.get('log-verbose', False) is False:
+ if verbose is True and config.get('log-verbose', False) is False:
return
name = dn['name'] if type(dn) is dict else dn
timestamp = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S ')
- if config.config.get('log-timings') == 'elapsed':
- timestamp = timestamp[:9] + elapsed(config.config['start-time']) + ' '
- if config.config.get('log-timings', 'omit') == 'omit':
+ if config.get('log-timings') == 'elapsed':
+ timestamp = timestamp[:9] + elapsed(config['start-time']) + ' '
+ if config.get('log-timings', 'omit') == 'omit':
timestamp = ''
progress = ''
- if config.config.get('counter'):
- count = config.config['counter'].get()
- progress = '[%s/%s/%s] ' % \
- (count, config.config['tasks'], config.config['total'])
+ if config.get('counter'):
+ count = config['counter'].get()
+ progress = '[%s/%s/%s] ' % (count, config['tasks'], config['total'])
entry = '%s%s[%s] %s %s\n' % (timestamp, progress, name, message, data)
- if config.config.get('instances'):
- entry = str(config.config.get('fork', 0)) + ' ' + entry
+ if config.get('instances'):
+ entry = str(config.get('fork', 0)) + ' ' + entry
print(entry),
sys.stdout.flush()
@@ -471,42 +461,3 @@ def log_env(log, env, message=''):
logfile.write('%s=%s\n' % (key, msg))
logfile.write(message + '\n\n')
logfile.flush()
-
-
-@contextlib.contextmanager
-def chdir(dirname=None):
- currentdir = os.getcwd()
- try:
- if dirname is not None:
- os.chdir(dirname)
- yield
- finally:
- os.chdir(currentdir)
-
-
-@contextlib.contextmanager
-def timer(dn, message=''):
- starttime = datetime.datetime.now()
- log(dn, 'Starting ' + message)
- if type(dn) is dict:
- dn['start-time'] = starttime
- try:
- yield
- except:
- raise
- text = '' if message == '' else ' for ' + message
- time_elapsed = elapsed(starttime)
- log(dn, 'Elapsed time' + text, time_elapsed)
- log_riemann(dn, 'Timer', text, time_elapsed)
-
-
-def log_riemann(dn, service, text, time_elapsed):
- if riemann_available and 'riemann-server' in config.config:
- time_split = time_elapsed.split(':')
- time_sec = int(time_split[0]) * 3600 \
- + int(time_split[1]) * 60 + int(time_split[2])
- with QueuedClient(TCPTransport(config.config['riemann-server'],
- config.config['riemann-port'],
- timeout=30)) as client:
- client.event(service=service, description=text, metric_f=time_sec)
- client.flush()