summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordevcurmudgeon <paul.sherwood@codethink.co.uk>2016-11-28 10:00:58 +0000
committerdevcurmudgeon <paul.sherwood@codethink.co.uk>2016-11-28 10:00:58 +0000
commiteb632e633159fe265735481ecd75681d9782e88e (patch)
tree7f648ca36e4e472a3134fa176acb766cf73ccf18
parent1556ae96714e564346a3c250334bc8d87181113d (diff)
parent9a7552b487fed9b54f402998034194251d8c526f (diff)
downloadybd-eb632e633159fe265735481ecd75681d9782e88e.tar.gz
Merge branch 'lc/py3.4' into 'master'
lc/py3.4 See merge request !263
-rw-r--r--.gitlab-ci.yml7
-rwxr-xr-xinstall_dependencies.sh28
-rwxr-xr-xkbas.py2
-rwxr-xr-xkbas/__main__.py59
-rw-r--r--setup.py2
-rwxr-xr-xybd.py2
-rw-r--r--ybd/__init__.py21
-rwxr-xr-xybd/__main__.py81
-rw-r--r--ybd/app.py107
-rw-r--r--ybd/assembly.py58
-rw-r--r--ybd/cache.py165
-rwxr-xr-xybd/concourse.py10
-rw-r--r--ybd/config.py2
-rw-r--r--ybd/defaults.py9
-rw-r--r--ybd/deployment.py29
-rw-r--r--ybd/morphs.py7
-rw-r--r--ybd/pots.py10
-rw-r--r--ybd/release_note.py10
-rw-r--r--ybd/repos.py124
-rw-r--r--ybd/sandbox.py123
-rw-r--r--ybd/splitting.py52
-rw-r--r--ybd/utils.py128
22 files changed, 533 insertions, 503 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 10dec18..57d214c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,4 +1,4 @@
-image: devcurmudgeon/foo
+image: python:3.4-onbuild
before_script:
- sh ./install_dependencies.sh
@@ -24,11 +24,10 @@ cache_keys_v1:
YBD_artifact_version: "1"
YBD_mode: "keys-only"
script:
- - cd ..
- git clone git://git.baserock.org/baserock/baserock/definitions
- cd definitions
- git checkout baserock-14.40
- - ../ybd/ybd.py ci x86_64
+ - ../ybd.py ci x86_64
- echo ci.b9de86669ce182e60e3f9445e6394b478b67a2c73b4c0764491c158c5f2569e9 > expected.result
- diff expected.result ybd.result
@@ -61,4 +60,4 @@ check_build_no_kbas:
YBD_kbas_url: "false-url"
script:
- git clone git://git.baserock.org/baserock/baserock/definitions
- - ./ybd.py definitions/strata/build-essential.morph x86_64
+ - ./ybd.py definitions/systems/minimal-system-x86_64-generic.morph x86_64
diff --git a/install_dependencies.sh b/install_dependencies.sh
index 1a4c6a2..a69a8f2 100755
--- a/install_dependencies.sh
+++ b/install_dependencies.sh
@@ -32,7 +32,7 @@ installed=false
which apt-get 2>&1 > /dev/null
if [ $? -eq 0 ]; then
$SUDO apt-get -qq update
- $SUDO apt-get -qq install build-essential gawk git m4 wget
+ $SUDO apt-get -qq install build-essential gawk git m4 wget python3
if [ $? -ne 0 ]; then
echo "Install failed"
exit 1
@@ -43,7 +43,7 @@ fi
# install for fedora
which dnf 2>&1 > /dev/null
if [ $? -eq 0 ] && [ $installed = false ]; then
- $SUDO dnf install -y which make automake gcc gcc-c++ gawk git m4 wget python
+ $SUDO dnf install -y which make automake gcc gcc-c++ gawk git m4 wget python3
if [ $? -ne 0 ]; then
echo "Install failed"
exit 1
@@ -54,7 +54,7 @@ fi
# install for aws
which yum 2>&1 > /dev/null
if [ $? -eq 0 ] && [ $installed = false ]; then
- $SUDO yum install -y which make automake gcc gcc-c++ gawk git m4 wget python
+ $SUDO yum install -y which make automake gcc gcc-c++ gawk git m4 wget python3
if [ $? -ne 0 ]; then
echo "Install failed"
exit 1
@@ -65,7 +65,22 @@ fi
# install for Arch
which pacman 2>&1 > /dev/null
if [ $? -eq 0 ] && [ $installed = false ]; then
- $SUDO pacman -S --noconfirm which make automake gcc gawk git m4 wget python2
+ $SUDO pacman -S --noconfirm which make automake gcc gawk git m4 wget python3
+ if [ $? -ne 0 ]; then
+ echo "Install failed"
+ exit 1
+ fi
+ installed=true
+fi
+
+# install for Alpine
+which apk 2>&1 > /dev/null
+if [ $? -eq 0 ] && [ $installed = false ]; then
+ $SUDO apk add --update gcc gawk git m4 wget which make
+ $SUDO apk --no-cache add ca-certificates
+ $SUDO wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub
+ $SUDO wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r3/glibc-2.23-r3.apk
+ $SUDO apk add glibc-2.23-r3.apk
if [ $? -ne 0 ]; then
echo "Install failed"
exit 1
@@ -78,11 +93,10 @@ if [ $installed = false ]; then
exit 1
fi
-pip --version 2>&1 > /dev/null
+pip3 --version 2>&1 > /dev/null
if [ $? -ne 0 ]; then
wget https://bootstrap.pypa.io/get-pip.py
- chmod +x get-pip.py
- $SUDO ./get-pip.py
+ $SUDO python3 get-pip.py
$SUDO rm get-pip.py
fi
diff --git a/kbas.py b/kbas.py
index da8547c..544c93c 100755
--- a/kbas.py
+++ b/kbas.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (C) 2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
diff --git a/kbas/__main__.py b/kbas/__main__.py
index 4445f67..0f4239d 100755
--- a/kbas/__main__.py
+++ b/kbas/__main__.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (C) 2015-2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
@@ -40,9 +40,9 @@ class KeyedBinaryArtifactServer(object):
app.load_configs([
os.path.join(os.getcwd(), 'kbas.conf'),
os.path.join(os.path.dirname(__file__), 'config', 'kbas.conf')])
- app.config['start-time'] = datetime.now()
- app.config['last-upload'] = datetime.now()
- app.config['downloads'] = 0
+ config.config['start-time'] = datetime.now()
+ config.config['last-upload'] = datetime.now()
+ config.config['downloads'] = 0
try:
import cherrypy
@@ -51,12 +51,12 @@ class KeyedBinaryArtifactServer(object):
server = 'wsgiref'
# for development:
- if app.config.get('mode') == 'development':
- bottle.run(server=server, host=app.config['host'],
- port=app.config['port'], debug=True, reloader=True)
+ if config.config.get('mode') == 'development':
+ bottle.run(server=server, host=config.config['host'],
+ port=config.config['port'], debug=True, reloader=True)
else:
- bottle.run(server=server, host=app.config['host'],
- port=app.config['port'], reloader=True)
+ bottle.run(server=server, host=config.config['host'],
+ port=config.config['port'], reloader=True)
@bottle.get('/static/<filename>')
def send_static(filename):
@@ -67,7 +67,7 @@ class KeyedBinaryArtifactServer(object):
@bottle.get('/<name>')
@bottle.get('/artifacts/<name>')
def list(name=""):
- names = glob.glob(os.path.join(app.config['artifact-dir'],
+ names = glob.glob(os.path.join(config.config['artifact-dir'],
'*' + name + '*'))
try:
content = [[strftime('%y-%m-%d', gmtime(os.stat(x).st_atime)),
@@ -86,28 +86,30 @@ class KeyedBinaryArtifactServer(object):
@bottle.get('/1.0/artifacts')
def get_morph_artifact():
f = request.query.filename
- return static_file(f, root=app.config['artifact-dir'], download=True)
+ return static_file(f, root=config.config['artifact-dir'],
+ download=True)
@bottle.get('/get/<cache_id>')
def get_artifact(cache_id):
f = os.path.join(cache_id, cache_id)
- app.config['downloads'] += 1
- return static_file(f, root=app.config['artifact-dir'], download=True,
- mimetype='application/x-tar')
+ config.config['downloads'] += 1
+ return static_file(f, root=config.config['artifact-dir'],
+ download=True, mimetype='application/x-tar')
@bottle.get('/')
@bottle.get('/status')
def status():
- stat = os.statvfs(app.config['artifact-dir'])
+ stat = os.statvfs(config.config['artifact-dir'])
free = stat.f_frsize * stat.f_bavail / 1000000000
- artifacts = len(os.listdir(app.config['artifact-dir']))
- started = app.config['start-time'].strftime('%y-%m-%d %H:%M:%S')
- downloads = app.config['downloads']
- last_upload = app.config['last-upload'].strftime('%y-%m-%d %H:%M:%S')
+ artifacts = len(os.listdir(config.config['artifact-dir']))
+ started = config.config['start-time'].strftime('%y-%m-%d %H:%M:%S')
+ downloads = config.config['downloads']
+ last_upload = config.config['last-upload'].strftime(
+ '%y-%m-%d %H:%M:%S')
content = [['Started:', started, None]]
content += [['Last upload:', last_upload, None]]
- if app.config.get('last-reject'):
- content += [['Last reject:', app.config['last-reject'], None]]
+ if config.config.get('last-reject'):
+ content += [['Last reject:', config.config['last-reject'], None]]
content += [['Space:', str(free) + 'GB', None]]
content += [['Artifacts:', str(artifacts), None]]
content += [['Downloads:', downloads, None]]
@@ -118,10 +120,10 @@ class KeyedBinaryArtifactServer(object):
@bottle.post('/upload')
def post_artifact():
- if app.config['password'] is 'insecure' or \
- request.forms.get('password') != app.config['password']:
+ if config.config['password'] is 'insecure' or \
+ request.forms.get('password') != config.config['password']:
print 'Upload attempt: password fail'
- app.config['last-reject'] = \
+ config.config['last-reject'] = \
datetime.now().strftime('%y-%m-%d %H:%M:%S')
response.status = 401 # unauthorized
return
@@ -131,14 +133,15 @@ class KeyedBinaryArtifactServer(object):
response.status = 400 # bad request, cache_id contains bad things
return
- if os.path.isdir(os.path.join(app.config['artifact-dir'], cache_id)):
+ if os.path.isdir(os.path.join(config.config['artifact-dir'],
+ cache_id)):
if cache.check(cache_id) == request.forms.get('checksum', 'XYZ'):
response.status = 777 # this is the same binary we have
return
response.status = 405 # not allowed, this artifact exists
return
- tempfile.tempdir = app.config['artifact-dir']
+ tempfile.tempdir = config.config['artifact-dir']
tmpdir = tempfile.mkdtemp()
try:
upload = request.files.get('file')
@@ -158,10 +161,10 @@ class KeyedBinaryArtifactServer(object):
checksum = cache.md5(artifact)
with open(artifact + '.md5', "a") as f:
f.write(checksum)
- shutil.move(tmpdir, os.path.join(app.config['artifact-dir'],
+ shutil.move(tmpdir, os.path.join(config.config['artifact-dir'],
cache_id))
response.status = 201 # success!
- app.config['last-upload'] = datetime.now()
+ config.config['last-upload'] = datetime.now()
return
except:
# something went wrong, clean up
diff --git a/setup.py b/setup.py
index aa2d8a0..aabe2c7 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
from setuptools import setup
diff --git a/ybd.py b/ybd.py
index 933e315..f2f3f6f 100755
--- a/ybd.py
+++ b/ybd.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (C) 2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
diff --git a/ybd/__init__.py b/ybd/__init__.py
index 92593fa..14839a6 100644
--- a/ybd/__init__.py
+++ b/ybd/__init__.py
@@ -13,13 +13,14 @@
# with this program. If not, see <http://www.gnu.org/licenses/>.
-import app
-import assembly
-import cache
-import defaults
-import morphs
-import pots
-import deployment
-import repos
-import sandbox
-import utils
+import ybd.app
+import ybd.assembly
+import ybd.cache
+import ybd.config
+import ybd.defaults
+import ybd.morphs
+import ybd.pots
+import ybd.deployment
+import ybd.repos
+import ybd.sandbox
+import ybd.utils
diff --git a/ybd/__main__.py b/ybd/__main__.py
index 27a292f..e0bff27 100755
--- a/ybd/__main__.py
+++ b/ybd/__main__.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (C) 2014-2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
@@ -20,25 +20,26 @@
import os
import sys
import fcntl
-import app
-from app import cleanup, config, log, RetryException, setup, spawn, timer
-from assembly import compose
-from deployment import deploy
-from pots import Pots
-from concourse import Pipeline
-import cache
-from release_note import do_release_note
-import sandbox
+from ybd import app, cache, sandbox
+from ybd.app import cleanup, RetryException, setup, spawn
+from ybd.assembly import compose
+from ybd import config
+from ybd.deployment import deploy
+from ybd.pots import Pots
+from ybd.concourse import Pipeline
+from ybd.release_note import do_release_note
+from ybd.utils import log, timer
import sandboxlib
import yaml
def write_cache_key():
- with open(config['result-file'], 'w') as f:
+ with open(config.config['result-file'], 'w') as f:
f.write(target['cache'] + '\n')
for kind in ['systems', 'strata', 'chunks']:
- log('COUNT', '%s has %s %s' % (config['target'], config[kind], kind))
- log('RESULT', 'Cache-key for target is at', config['result-file'])
+ log('COUNT', '%s has %s %s' % (config.config['target'],
+ config.config[kind], kind))
+ log('RESULT', 'Cache-key for target is at', config.config['result-file'])
print('')
@@ -52,46 +53,46 @@ if not os.path.exists('./VERSION'):
os.chdir(os.path.join(os.getcwd(), '..', 'definitions'))
setup(sys.argv, original_cwd)
-cleanup(config['tmp'])
+cleanup(config.config['tmp'])
with timer('TOTAL'):
- tmp_lock = open(os.path.join(config['tmp'], 'lock'), 'r')
+ tmp_lock = open(os.path.join(config.config['tmp'], 'lock'), 'r')
fcntl.flock(tmp_lock, fcntl.LOCK_SH | fcntl.LOCK_NB)
- target = os.path.join(config['defdir'], config['target'])
- log('TARGET', 'Target is %s' % target, config['arch'])
- with timer('DEFINITIONS', 'parsing %s' % config['def-version']):
- app.defs = Pots()
+ target = os.path.join(config.config['defdir'], config.config['target'])
+ log('TARGET', 'Target is %s' % target, config.config['arch'])
+ with timer('DEFINITIONS', 'parsing %s' % config.config['def-version']):
+ config.defs = Pots()
- target = app.defs.get(config['target'])
- if config.get('mode', 'normal') == 'parse-only':
+ target = config.defs.get(config.config['target'])
+ if config.config.get('mode', 'normal') == 'parse-only':
Pipeline(target)
os._exit(0)
with timer('CACHE-KEYS', 'cache-key calculations'):
cache.cache_key(target)
- if 'release-note' in config:
- do_release_note(config['release-note'])
+ if 'release-note' in config.config:
+ do_release_note(config.config['release-note'])
- if config['total'] == 0 or (config['total'] == 1 and
- target.get('kind') == 'cluster'):
- log('ARCH', 'No definitions for', config['arch'], exit=True)
+ if config.config['total'] == 0 or (config.config['total'] == 1 and
+ target.get('kind') == 'cluster'):
+ log('ARCH', 'No definitions for', config.config['arch'], exit=True)
- app.defs.save_trees()
- if config.get('mode', 'normal') == 'keys-only':
+ config.defs.save_trees()
+ if config.config.get('mode', 'normal') == 'keys-only':
write_cache_key()
os._exit(0)
- cache.cull(config['artifacts'])
+ cache.cull(config.config['artifacts'])
sandbox.executor = sandboxlib.executor_for_platform()
- log(config['target'], 'Sandbox using %s' % sandbox.executor)
+ log(config.config['target'], 'Sandbox using %s' % sandbox.executor)
if sandboxlib.chroot == sandbox.executor:
- log(config['target'], 'WARNING: using chroot is less safe ' +
+ log(config.config['target'], 'WARNING: using chroot is less safe ' +
'than using linux-user-chroot')
- if 'instances' in config:
+ if 'instances' in config.config:
spawn()
while True:
@@ -103,18 +104,18 @@ with timer('TOTAL'):
os._exit(1)
except RetryException:
pass
- except:
+ except Exception as e:
import traceback
traceback.print_exc()
log(target, 'Exiting: uncaught exception')
- os._exit(1)
+ raise e
- if config.get('reproduce'):
- log('REPRODUCED',
- 'Matched %s of' % len(config['reproduced']), config['tasks'])
- for match in config['reproduced']:
- print match[0], match[1]
+ if config.config.get('reproduce'):
+ log('REPRODUCED', 'Matched %s of' %
+ len(config.config['reproduced']), config.config['tasks'])
+ for match in config.config['reproduced']:
+ print(match[0], match[1])
- if target.get('kind') == 'cluster' and config.get('fork') is None:
+ if target.get('kind') == 'cluster' and config.config.get('fork') is None:
with timer(target, 'cluster deployment'):
deploy(target)
diff --git a/ybd/app.py b/ybd/app.py
index 51dc2d9..ea78e43 100644
--- a/ybd/app.py
+++ b/ybd/app.py
@@ -25,18 +25,11 @@ import yaml
from multiprocessing import cpu_count, Value, Lock
from subprocess import call
from fs.osfs import OSFS # not used here, but we import it to check install
-from repos import get_version
-from cache import cache_key
-try:
- from riemann_client.transport import TCPTransport
- from riemann_client.client import QueuedClient
- riemann_available = True
-except ImportError:
- riemann_available = False
-
-
-config = {}
-defs = {}
+from ybd.repos import get_version
+from ybd.cache import cache_key
+from ybd.utils import log
+from ybd.config import config
+from ybd import utils
class RetryException(Exception):
@@ -77,48 +70,6 @@ def lockfile(dn):
return os.path.join(config['tmp'], cache_key(dn) + '.lock')
-def log(dn, message='', data='', verbose=False, exit=False):
- ''' Print a timestamped log. '''
-
- if exit:
- print('\n\n')
- message = 'ERROR: ' + message.replace('WARNING: ', '')
-
- if verbose is True and config.get('log-verbose', False) is False:
- return
-
- name = dn['name'] if type(dn) is dict else dn
-
- timestamp = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S ')
- if config.get('log-timings') == 'elapsed':
- timestamp = timestamp[:9] + elapsed(config['start-time']) + ' '
- if config.get('log-timings', 'omit') == 'omit':
- timestamp = ''
- progress = ''
- if config.get('counter'):
- count = config['counter'].get()
- progress = '[%s/%s/%s] ' % (count, config['tasks'], config['total'])
- entry = '%s%s[%s] %s %s\n' % (timestamp, progress, name, message, data)
- if config.get('instances'):
- entry = str(config.get('fork', 0)) + ' ' + entry
-
- print(entry),
- sys.stdout.flush()
-
- if exit:
- print('\n\n')
- os._exit(1)
-
-
-def log_env(log, env, message=''):
- with open(log, "a") as logfile:
- for key in sorted(env):
- msg = env[key] if 'PASSWORD' not in key else '(hidden)'
- logfile.write('%s=%s\n' % (key, msg))
- logfile.write(message + '\n\n')
- logfile.flush()
-
-
def warning_handler(message, category, filename, lineno, file=None, line=None):
'''Output messages from warnings.warn() - default output is a bit ugly.'''
@@ -238,7 +189,7 @@ def load_configs(config_files):
for key, value in yaml.safe_load(text).items():
config[key.replace('_', '-')] = value
msg = value if 'PASSWORD' not in key.upper() else '(hidden)'
- print ' %s=%s' % (key.replace('_', '-'), msg)
+ print(' %s=%s' % (key.replace('_', '-'), msg))
print
@@ -270,52 +221,6 @@ def remove_dir(tmpdir):
log('SETUP', 'WARNING: unable to remove', tmpdir)
-@contextlib.contextmanager
-def chdir(dirname=None):
- currentdir = os.getcwd()
- try:
- if dirname is not None:
- os.chdir(dirname)
- yield
- finally:
- os.chdir(currentdir)
-
-
-@contextlib.contextmanager
-def timer(dn, message=''):
- starttime = datetime.datetime.now()
- log(dn, 'Starting ' + message)
- if type(dn) is dict:
- dn['start-time'] = starttime
- try:
- yield
- except:
- raise
- text = '' if message == '' else ' for ' + message
- time_elapsed = elapsed(starttime)
- log(dn, 'Elapsed time' + text, time_elapsed)
- log_riemann(dn, 'Timer', text, time_elapsed)
-
-
-def log_riemann(dn, service, text, time_elapsed):
- if riemann_available and 'riemann-server' in config:
- time_split = time_elapsed.split(':')
- time_sec = int(time_split[0]) * 3600 \
- + int(time_split[1]) * 60 + int(time_split[2])
- with QueuedClient(TCPTransport(config['riemann-server'],
- config['riemann-port'],
- timeout=30)) as client:
- client.event(service=service, description=text, metric_f=time_sec)
- client.flush()
-
-
-def elapsed(starttime):
- td = datetime.datetime.now() - starttime
- hours, remainder = divmod(int(td.total_seconds()), 60*60)
- minutes, seconds = divmod(remainder, 60)
- return "%02d:%02d:%02d" % (hours, minutes, seconds)
-
-
def spawn():
for fork in range(1, config.get('instances')):
if os.fork() == 0:
diff --git a/ybd/assembly.py b/ybd/assembly.py
index 34f278a..5f0fb41 100644
--- a/ybd/assembly.py
+++ b/ybd/assembly.py
@@ -20,21 +20,19 @@ import contextlib
import fcntl
import errno
-import app
-from app import config, timer, elapsed
-from app import log, log_riemann, lockfile, RetryException
-from cache import cache, cache_key, get_cache, get_remote
-import repos
-import sandbox
+from ybd import app, repos, sandbox, config
+from ybd.app import lockfile, RetryException
+from ybd.cache import cache, cache_key, get_cache, get_remote
import datetime
-from splitting import write_metadata, install_split_artifacts
+from ybd.splitting import write_metadata, install_split_artifacts
+from ybd.utils import log, log_riemann, elapsed, timer
def compose(dn):
'''Work through defs tree, building and assembling until target exists'''
if type(dn) is not dict:
- dn = app.defs.get(dn)
+ dn = config.defs.get(dn)
# if we can't calculate cache key, we can't create this component
if cache_key(dn) is False:
@@ -50,14 +48,15 @@ def compose(dn):
log(dn, "Composing", dn['name'], verbose=True)
# if we have a kbas, look there to see if this component exists
- if config.get('kbas-url') and not config.get('reproduce'):
+ if config.config.get('kbas-url') and not \
+ config.config.get('reproduce'):
with claim(dn):
if get_remote(dn):
- config['counter'].increment()
+ config.config['counter'].increment()
return cache_key(dn)
# we only work with user-specified arch
- if 'arch' in dn and dn['arch'] != config['arch']:
+ if 'arch' in dn and dn['arch'] != config.config['arch']:
return None
# Create composite components (strata, systems, clusters)
@@ -65,7 +64,7 @@ def compose(dn):
shuffle(systems)
for system in systems:
for s in system.get('subsystems', []):
- subsystem = app.defs.get(s['path'])
+ subsystem = config.defs.get(s['path'])
compose(subsystem)
compose(system['path'])
@@ -86,7 +85,7 @@ def install_contents(dn, contents=None):
shuffle(contents)
for it in contents:
- item = app.defs.get(it)
+ item = config.defs.get(it)
if os.path.exists(os.path.join(dn['sandbox'],
'baserock', item['name'] + '.meta')):
# content has already been installed
@@ -101,7 +100,7 @@ def install_contents(dn, contents=None):
compose(item)
sandbox.install(dn, item)
- if config.get('log-verbose'):
+ if config.config.get('log-verbose'):
log(dn, 'Added contents\n', contents)
sandbox.list_files(dn)
@@ -115,7 +114,7 @@ def install_dependencies(dn, dependencies=None):
log(dn, 'Installing dependencies\n', dependencies, verbose=True)
shuffle(dependencies)
for it in dependencies:
- dependency = app.defs.get(it)
+ dependency = config.defs.get(it)
if os.path.exists(os.path.join(dn['sandbox'], 'baserock',
dependency['name'] + '.meta')):
# dependency has already been installed
@@ -130,7 +129,7 @@ def install_dependencies(dn, dependencies=None):
if dependency.get('contents'):
install_dependencies(dn, dependency['contents'])
sandbox.install(dn, dependency)
- if config.get('log-verbose'):
+ if config.config.get('log-verbose'):
sandbox.list_files(dn)
@@ -156,12 +155,12 @@ def build(dn):
def run_build(dn):
- ''' This is where we run ./configure, make, make install (for example).
+ ''' This is where we run ./config.configure, make, make install (for example).
By the time we get here, all dependencies for component have already
been assembled.
'''
- if config.get('mode', 'normal') == 'no-build':
+ if config.config.get('mode', 'normal') == 'no-build':
log(dn, 'SKIPPING BUILD: artifact will be empty')
return
@@ -176,7 +175,7 @@ def run_build(dn):
env_vars = sandbox.env_vars_for_build(dn)
log(dn, 'Logging build commands to %s' % dn['log'])
- for build_step in app.defs.defaults.build_steps:
+ for build_step in config.defs.defaults.build_steps:
if dn.get(build_step):
log(dn, 'Running', build_step)
for command in dn.get(build_step, []):
@@ -194,7 +193,7 @@ def run_build(dn):
def shuffle(contents):
- if config.get('instances', 1) > 1:
+ if config.config.get('instances', 1) > 1:
random.seed(datetime.datetime.now())
random.shuffle(contents)
@@ -243,23 +242,24 @@ def get_build_commands(dn):
dn['install-commands'] = gather_integration_commands(dn)
return
- exit = True if config.get('check-definitions') == 'exit' else False
+ exit = True if config.config.get('check-definitions') == 'exit' else False
if 'build-system' in dn:
bs = dn['build-system']
log(dn, 'Defined build system is', bs)
else:
files = os.listdir(dn['checkout'])
- bs = app.defs.defaults.detect_build_system(files)
+ bs = config.defs.defaults.detect_build_system(files)
if bs == 'manual' and 'install-commands' not in dn:
if dn.get('kind', 'chunk') == 'chunk':
- print dn
+ print(dn)
log(dn, 'WARNING: No install-commands, manual build-system',
exit=exit)
log(dn, 'WARNING: Assumed build system is', bs)
- for build_step in app.defs.defaults.build_steps:
+ for build_step in config.defs.defaults.build_steps:
if dn.get(build_step, None) is None:
- commands = app.defs.defaults.build_systems[bs].get(build_step, [])
+ commands = config.defs.defaults.build_systems[bs].get(
+ build_step, [])
dn[build_step] = commands
@@ -271,15 +271,15 @@ def gather_integration_commands(dn):
def _gather_recursively(component, commands):
if 'system-integration' in component:
- for product, it in component['system-integration'].iteritems():
- for name, cmdseq in it.iteritems():
+ for product, it in component['system-integration'].items():
+ for name, cmdseq in it.items():
commands["%s-%s" % (name, product)] = cmdseq
for subcomponent in component.get('contents', []):
- _gather_recursively(app.defs.get(subcomponent), commands)
+ _gather_recursively(config.defs.get(subcomponent), commands)
all_commands = {}
_gather_recursively(dn, all_commands)
result = []
- for key in sorted(all_commands.keys()):
+ for key in sorted(list(all_commands.keys())):
result.extend(all_commands[key])
return result
diff --git a/ybd/cache.py b/ybd/cache.py
index 345a8b2..c66510c 100644
--- a/ybd/cache.py
+++ b/ybd/cache.py
@@ -22,9 +22,9 @@ import os
import shutil
from subprocess import call
-import app
-from repos import get_repo_url, get_tree
-import utils
+from ybd import utils, config
+from ybd.utils import log
+from ybd.repos import get_repo_url, get_tree
import tempfile
import yaml
import re
@@ -32,28 +32,28 @@ import re
def cache_key(dn):
if dn is None:
- app.log(dn, 'No definition found for', dn, exit=True)
+ log(dn, 'No definition found for', dn, exit=True)
if type(dn) is not dict:
- dn = app.defs.get(dn)
+ dn = config.defs.get(dn)
if dn.get('cache') == 'calculating':
- app.log(dn, 'Recursion loop for', dn, exit=True)
+ log(dn, 'Recursion loop for', dn, exit=True)
if dn.get('cache'):
return dn['cache']
- if dn.get('arch', app.config['arch']) != app.config['arch']:
+ if dn.get('arch', config.config['arch']) != config.config['arch']:
if 'tried' not in dn:
dn['tried'] = True
- app.log(dn, 'No cache_key for arch %s mismatch' % dn['arch'],
- app.config['arch'])
+ log(dn, 'No cache_key for arch %s mismatch' % dn['arch'],
+ config.config['arch'])
return False
dn['cache'] = 'calculating'
key = 'no-build'
- if app.config.get('mode', 'normal') in ['keys-only', 'normal']:
+ if config.config.get('mode', 'normal') in ['keys-only', 'normal']:
if dn.get('repo') and not dn.get('tree'):
dn['tree'] = get_tree(dn)
factors = hash_factors(dn)
@@ -62,39 +62,41 @@ def cache_key(dn):
dn['cache'] = dn['name'] + "." + key
- app.config['total'] += 1
+ config.config['total'] += 1
x = 'x'
if not get_cache(dn):
x = ' '
- app.config['tasks'] += 1
+ config.config['tasks'] += 1
if dn.get('kind', 'chunk') == 'chunk':
- app.config['chunks'] += 1
+ config.config['chunks'] += 1
if dn.get('kind', 'chunk') == 'stratum':
- app.config['strata'] += 1
+ config.config['strata'] += 1
if dn.get('kind', 'chunk') == 'system':
- app.config['systems'] += 1
+ config.config['systems'] += 1
- app.log('CACHE-KEYS', '[%s]' % x, dn['cache'])
- if app.config.get('manifest', False):
- update_manifest(dn, app.config['manifest'])
+ log('CACHE-KEYS', '[%s]' % x, dn['cache'])
+ if config.config.get('manifest', False):
+ update_manifest(dn, config.config['manifest'])
- if 'keys' in app.config:
- app.config['keys'] += [dn['cache']]
+ if 'keys' in config.config:
+ config.config['keys'] += [dn['cache']]
return dn['cache']
def hash_factors(dn):
- hash_factors = {'arch': app.config['arch']}
+ hash_factors = {'arch': config.config['arch']}
for factor in dn.get('build-depends', []):
hash_factors[factor] = cache_key(factor)
for factor in dn.get('contents', []):
- hash_factors[factor.keys()[0]] = cache_key(factor.keys()[0])
+ key = list(factor.keys())[0]
+ hash_factors[key] = cache_key(key)
- relevant_factors = ['tree', 'submodules'] + app.defs.defaults.build_steps
- if app.config.get('artifact-version', False) not in range(0, 6):
+ relevant_factors = ['tree', 'submodules'] + \
+ config.defs.defaults.build_steps
+ if config.config.get('artifact-version', False) not in range(0, 6):
relevant_factors += ['devices']
for factor in relevant_factors:
@@ -102,8 +104,8 @@ def hash_factors(dn):
hash_factors[factor] = dn[factor]
if dn.get('kind') == 'system':
- if app.config.get('default-splits', []) != []:
- hash_factors['splits'] = app.config.get('default-splits')
+ if config.config.get('default-splits', []) != []:
+ hash_factors['splits'] = config.config.get('default-splits')
def hash_system_recursively(system):
factor = system.get('path', 'BROKEN')
@@ -115,23 +117,24 @@ def hash_factors(dn):
for system in dn.get('systems', []):
hash_system_recursively(system)
- if app.config.get('artifact-version', False):
- hash_factors['artifact-version'] = app.config.get('artifact-version')
+ if config.config.get('artifact-version', False):
+ hash_factors['artifact-version'] = config.config['artifact-version']
- if app.config.get('artifact-version', 0) in range(0, 2):
+ if config.config.get('artifact-version', 0) in range(0, 2):
# this way, any change to any build-system invalidates all caches
hash_factors['default-build-systems'] = \
- app.defs.defaults.build_systems
+ config.defs.defaults.build_systems
else:
# this way is better - only affected components get a new key
hash_factors['default-build-systems'] = \
- app.defs.defaults.build_systems.get(dn.get('build-system',
- 'manual'))
- if (app.config.get('default-splits', []) != [] and
+ config.defs.defaults.build_systems.get(dn.get('build-system',
+ 'manual'))
+ if (config.config.get('default-splits', []) != [] and
dn.get('kind') == 'system'):
- hash_factors['default-splits'] = app.config['default-splits']
+ hash_factors['default-splits'] = \
+ config.config['default-splits']
- if app.config.get('artifact-version', 0) not in range(0, 7):
+ if config.config.get('artifact-version', 0) not in range(0, 7):
if dn.get('max-jobs'):
if dn['max-jobs'] == 1:
hash_factors['max-jobs'] = 'single'
@@ -143,9 +146,9 @@ def hash_factors(dn):
def cache(dn):
if get_cache(dn):
- app.log(dn, "Bah! I could have cached", cache_key(dn))
+ log(dn, "Bah! I could have cached", cache_key(dn))
return
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
tmpdir = tempfile.mkdtemp()
cachefile = os.path.join(tmpdir, cache_key(dn))
if dn.get('kind') == "system":
@@ -160,12 +163,13 @@ def cache(dn):
shutil.move('%s.tar.gz' % cachefile, cachefile)
unpack(dn, cachefile)
- app.config['counter'].increment()
+ config.config['counter'].increment()
- if app.config.get('kbas-password', 'insecure') != 'insecure' and \
- app.config.get('kbas-url') is not None:
- if dn.get('kind', 'chunk') in app.config.get('kbas-upload', 'chunk'):
- with app.timer(dn, 'upload'):
+ if config.config.get('kbas-password', 'insecure') != 'insecure' and \
+ config.config.get('kbas-url') is not None:
+ if dn.get('kind', 'chunk') in \
+ config.config.get('kbas-upload', 'chunk'):
+ with utils.timer(dn, 'upload'):
upload(dn)
@@ -196,56 +200,57 @@ def unpack(dn, tmpfile):
unpackdir = tmpfile + '.unpacked'
os.makedirs(unpackdir)
if call(['tar', 'xf', tmpfile, '--directory', unpackdir]):
- app.log(dn, 'Problem unpacking', tmpfile, exit=True)
+ log(dn, 'Problem unpacking', tmpfile, exit=True)
else:
with open(os.devnull, "w") as fnull:
if call(['tar', 'tvf', tmpfile], stdout=fnull, stderr=fnull):
- app.log(dn, 'Problem with tarfile', tmpfile, exit=True)
+ log(dn, 'Problem with tarfile', tmpfile, exit=True)
try:
- path = os.path.join(app.config['artifacts'], cache_key(dn))
+ path = os.path.join(config.config['artifacts'], cache_key(dn))
shutil.move(os.path.dirname(tmpfile), path)
if not os.path.isdir(path):
- app.log(dn, 'Problem creating artifact', path, exit=True)
+ log(dn, 'Problem creating artifact', path, exit=True)
size = os.path.getsize(get_cache(dn))
size = re.sub("(\d)(?=(\d{3})+(?!\d))", r"\1,", "%d" % size)
checksum = md5(get_cache(dn))
- app.log(dn, 'Cached %s bytes %s as' % (size, checksum), cache_key(dn))
+ log(dn, 'Cached %s bytes %s as' % (size, checksum), cache_key(dn))
return path
except:
- app.log(dn, 'Bah! I raced on', cache_key(dn))
+ log(dn, 'Bah! I raced on', cache_key(dn))
shutil.rmtree(os.path.dirname(tmpfile))
return False
def upload(dn):
cachefile = get_cache(dn)
- url = app.config['kbas-url'] + 'upload'
+ url = config.config['kbas-url'] + 'upload'
params = {"filename": dn['cache'],
- "password": app.config['kbas-password'],
+ "password": config.config['kbas-password'],
"checksum": md5(cachefile)}
with open(cachefile, 'rb') as f:
try:
response = requests.post(url=url, data=params, files={"file": f})
if response.status_code == 201:
- app.log(dn, 'Uploaded %s to' % dn['cache'], url)
+ log(dn, 'Uploaded %s to' % dn['cache'], url)
return
if response.status_code == 777:
- app.log(dn, 'Reproduced %s at' % md5(cachefile), dn['cache'])
- app.config['reproduced'].append([md5(cachefile), dn['cache']])
+ log(dn, 'Reproduced %s at' % md5(cachefile), dn['cache'])
+ config.config['reproduced'].append(
+ [md5(cachefile), dn['cache']])
return
if response.status_code == 405:
# server has different md5 for this artifact
- if dn['kind'] == 'stratum' and app.config['reproduce']:
- app.log('BIT-FOR-BIT',
- 'WARNING: reproduction failed for', dn['cache'])
- app.log(dn, 'Artifact server already has', dn['cache'])
+ if dn['kind'] == 'stratum' and config.config['reproduce']:
+ log('BIT-FOR-BIT',
+ 'WARNING: reproduction failed for', dn['cache'])
+ log(dn, 'Artifact server already has', dn['cache'])
return
- app.log(dn, 'Artifact server problem:', response.status_code)
+ log(dn, 'Artifact server problem:', response.status_code)
except:
pass
- app.log(dn, 'Failed to upload', dn['cache'])
+ log(dn, 'Failed to upload', dn['cache'])
def get_cache(dn):
@@ -254,16 +259,16 @@ def get_cache(dn):
if cache_key(dn) is False:
return False
- cachedir = os.path.join(app.config['artifacts'], cache_key(dn))
+ cachedir = os.path.join(config.config['artifacts'], cache_key(dn))
if os.path.isdir(cachedir):
call(['touch', cachedir])
artifact = os.path.join(cachedir, cache_key(dn))
unpackdir = artifact + '.unpacked'
if not os.path.isdir(unpackdir) and dn.get('kind') != 'system':
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
tmpdir = tempfile.mkdtemp()
if call(['tar', 'xf', artifact, '--directory', tmpdir]):
- app.log(dn, 'Problem unpacking', artifact)
+ log(dn, 'Problem unpacking', artifact)
return False
try:
shutil.move(tmpdir, unpackdir)
@@ -279,26 +284,27 @@ def get_cache(dn):
def get_remote(dn):
''' If a remote cached artifact exists for d, retrieve it '''
- if app.config.get('last-retry-component') == dn or dn.get('tried'):
+ if config.config.get('last-retry-component') == dn or dn.get('tried'):
return False
dn['tried'] = True # let's not keep asking for this artifact
- if dn.get('kind', 'chunk') not in app.config.get('kbas-upload', 'chunk'):
+ if dn.get('kind', 'chunk') not in \
+ config.config.get('kbas-upload', 'chunk'):
return False
try:
- app.log(dn, 'Try downloading', cache_key(dn))
- url = app.config['kbas-url'] + 'get/' + cache_key(dn)
+ log(dn, 'Try downloading', cache_key(dn))
+ url = config.config['kbas-url'] + 'get/' + cache_key(dn)
response = requests.get(url=url, stream=True)
except:
- app.config.pop('kbas-url')
- app.log(dn, 'WARNING: remote artifact server is not working')
+ config.config.pop('kbas-url')
+ log(dn, 'WARNING: remote artifact server is not working')
return False
if response.status_code == 200:
try:
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
tmpdir = tempfile.mkdtemp()
cachefile = os.path.join(tmpdir, cache_key(dn))
with open(cachefile, 'wb') as f:
@@ -307,13 +313,13 @@ def get_remote(dn):
return unpack(dn, cachefile)
except:
- app.log(dn, 'WARNING: failed downloading', cache_key(dn))
+ log(dn, 'WARNING: failed downloading', cache_key(dn))
return False
def cull(artifact_dir):
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
deleted = 0
def clear(deleted, artifact_dir):
@@ -321,16 +327,15 @@ def cull(artifact_dir):
for artifact in artifacts:
stat = os.statvfs(artifact_dir)
free = stat.f_frsize * stat.f_bavail / 1000000000
- if free >= app.config.get('min-gigabytes', 10):
- app.log('SETUP', '%sGB is enough free space' % free)
+ if free >= config.config.get('min-gigabytes', 10):
+ log('SETUP', '%sGB is enough free space' % free)
if deleted > 0:
- app.log('SETUP', 'Culled %s items in' % deleted,
- artifact_dir)
+ log('SETUP', 'Culled %s items in' % deleted, artifact_dir)
return True
path = os.path.join(artifact_dir, artifact)
if os.path.exists(os.path.join(path, artifact + '.unpacked')):
path = os.path.join(path, artifact + '.unpacked')
- if os.path.exists(path) and artifact not in app.config['keys']:
+ if os.path.exists(path) and artifact not in config.config['keys']:
tmpdir = tempfile.mkdtemp()
shutil.move(path, os.path.join(tmpdir, 'to-delete'))
app.remove_dir(tmpdir)
@@ -347,14 +352,14 @@ def cull(artifact_dir):
stat = os.statvfs(artifact_dir)
free = stat.f_frsize * stat.f_bavail / 1000000000
- if free < app.config.get('min-gigabytes', 10):
- app.log('SETUP', '%sGB is less than min-gigabytes:' % free,
- app.config.get('min-gigabytes', 10), exit=True)
+ if free < config.config.get('min-gigabytes', 10):
+ log('SETUP', '%sGB is less than min-gigabytes:' % free,
+ config.config.get('min-gigabytes', 10), exit=True)
def check(artifact):
try:
- artifact = os.path.join(app.config['artifact-dir'], artifact,
+ artifact = os.path.join(config.config['artifact-dir'], artifact,
artifact)
checkfile = artifact + '.md5'
if not os.path.exists(checkfile):
diff --git a/ybd/concourse.py b/ybd/concourse.py
index 4d5a201..5c6b577 100755
--- a/ybd/concourse.py
+++ b/ybd/concourse.py
@@ -15,8 +15,8 @@
# =*= License: GPL-2 =*=
import yaml
-import app
-from app import log, timer, defs
+import ybd.app
+from ybd.utils import log, timer
# Concourse data model:
# a 'resource' is an input line into a box
@@ -35,18 +35,18 @@ class Pipeline(object):
'image': 'docker:///devcurmudgeon/foo'}
self.write_pipeline(dn)
- output = app.defs.get(dn)['name'] + '.yml'
+ output = config.defs.get(dn)['name'] + '.yml'
with open(output, 'w') as f:
pipeline = {'resources': self.resources, 'jobs': self.jobs}
f.write(yaml.dump(pipeline, default_flow_style=False))
log('CONCOURSE', 'pipeline is at', output)
def write_pipeline(self, dn):
- dn = app.defs.get(dn)
+ dn = config.defs.get(dn)
self.add_resource(dn)
aggregate = []
for it in dn.get('build-depends', []) + dn.get('contents', []):
- component = app.defs.get(it)
+ component = config.defs.get(it)
self.add_resource(component)
if component.get('kind', 'chunk') == 'chunk':
aggregate += [{'get': component['name']}]
diff --git a/ybd/config.py b/ybd/config.py
new file mode 100644
index 0000000..fc8f3a7
--- /dev/null
+++ b/ybd/config.py
@@ -0,0 +1,2 @@
+config = {}
+defs = {}
diff --git a/ybd/defaults.py b/ybd/defaults.py
index a8f2184..2cb4f44 100644
--- a/ybd/defaults.py
+++ b/ybd/defaults.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (C) 2015-2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
@@ -25,7 +25,8 @@ These definitions shall be used if no DEFAULTS file is present.
'''
import os
-import app
+from ybd import app, config
+from ybd.utils import log
import yaml
@@ -42,7 +43,7 @@ class Defaults(object):
def _load_defaults(self, defaults_file='./DEFAULTS'):
'''Get defaults, either from a DEFAULTS file, or built-in defaults.'''
ybd_defaults_file = os.path.join(os.path.dirname(__file__),
- app.config['defaults'])
+ config.config['defaults'])
ybd_defaults = self._load(ybd_defaults_file, ignore_errors=True)
defaults = self._load(defaults_file, ignore_errors=True)
@@ -61,7 +62,7 @@ class Defaults(object):
contents = yaml.safe_load(f)
except:
if ignore_errors:
- app.log('DEFAULTS', 'WARNING: problem loading', path)
+ log('DEFAULTS', 'WARNING: problem loading', path)
return None
else:
raise
diff --git a/ybd/deployment.py b/ybd/deployment.py
index a13d936..72103fa 100644
--- a/ybd/deployment.py
+++ b/ybd/deployment.py
@@ -17,17 +17,16 @@
import os
from subprocess import call
import json
-import app
-import cache
-import sandbox
+from ybd import app, cache, config, sandbox, utils
+from ybd.utils import log
def deploy(target):
'''Deploy a cluster definition.'''
- arch = app.config['arch']
+ arch = config.config['arch']
for system in target.get('systems', []):
- if app.defs.get(system).get('arch', arch) == arch:
- with app.timer(system, 'deployment'):
+ if config.defs.get(system).get('arch', arch) == arch:
+ with utils.timer(system, 'deployment'):
deploy_system(system)
@@ -41,14 +40,14 @@ def deploy_system(system_spec, parent_location=''):
the result being used as the location for the deployment extensions.
'''
- system = app.defs.get(system_spec['path'])
+ system = config.defs.get(system_spec['path'])
if not cache.get_cache(system):
- app.log('DEPLOY', 'System is not built, cannot deploy:\n', system,
- exit=True)
+ log('DEPLOY', 'System is not built, cannot deploy:\n', system,
+ exit=True)
deploy_defaults = system_spec.get('deploy-defaults')
with sandbox.setup(system):
- app.log(system, 'Extracting system artifact into', system['sandbox'])
+ log(system, 'Extracting system artifact into', system['sandbox'])
with open(cache.get_cache(system), 'r') as artifact:
call(['tar', 'x', '--directory', system['sandbox']],
stdin=artifact)
@@ -58,7 +57,7 @@ def deploy_system(system_spec, parent_location=''):
subsystem = dict(deploy_defaults.items() + subsystem.items())
deploy_system(subsystem, parent_location=system['sandbox'])
- for name, deployment in system_spec.get('deploy', {}).iteritems():
+ for name, deployment in system_spec.get('deploy', {}).items():
method = deployment.get('type') or deployment.get('upgrade-type')
method = os.path.basename(method)
if deploy_defaults:
@@ -72,7 +71,7 @@ def deploy_system(system_spec, parent_location=''):
try:
sandbox.run_extension(system, deployment, 'check', method)
except KeyError:
- app.log(system, "Couldn't find a check extension for", method)
+ log(system, "Couldn't find a check extension for", method)
for ext in system.get('configuration-extensions', []):
sandbox.run_extension(system, deployment, 'configure',
@@ -82,9 +81,9 @@ def deploy_system(system_spec, parent_location=''):
def do_deployment_manifest(system, configuration):
- app.log(system, "Creating deployment manifest in", system['sandbox'])
+ log(system, "Creating deployment manifest in", system['sandbox'])
data = {'configuration': configuration}
metafile = os.path.join(system['sandbox'], 'baserock', 'deployment.meta')
- with app.chdir(system['sandbox']), open(metafile, "w") as f:
- json.dump(data, f, indent=4, sort_keys=True, encoding='unicode-escape')
+ with utils.chdir(system['sandbox']), open(metafile, "w") as f:
+ json.dump(data, f, indent=4, sort_keys=True)
f.flush()
diff --git a/ybd/morphs.py b/ybd/morphs.py
index 5796d15..c108377 100644
--- a/ybd/morphs.py
+++ b/ybd/morphs.py
@@ -17,8 +17,9 @@
import yaml
import glob
import os
-from app import chdir, config, log
-from defaults import Defaults
+from ybd.config import config
+from ybd.defaults import Defaults
+from ybd.utils import chdir, log
class Morphs(object):
@@ -58,7 +59,7 @@ class Morphs(object):
with open(path) as f:
text = f.read()
contents = yaml.safe_load(text)
- except yaml.YAMLError, exc:
+ except yaml.YAMLError as exc:
log('DEFINITIONS', 'Could not parse %s' % path, exc, exit=True)
if type(contents) is not dict:
diff --git a/ybd/pots.py b/ybd/pots.py
index cbf494e..7e1ad09 100644
--- a/ybd/pots.py
+++ b/ybd/pots.py
@@ -16,9 +16,11 @@
import os
import yaml
-from app import config, log
-from defaults import Defaults
-from morphs import Morphs
+from ybd import app
+from ybd.app import config
+from ybd.defaults import Defaults
+from ybd.morphs import Morphs
+from ybd.utils import log
# copied from http://stackoverflow.com/questions/21016220
@@ -52,7 +54,7 @@ class Pots(object):
return self._data.get(dn)
log(dn, 'Unable to find definition for', dn, exit=True)
- return self._data.get(dn.get('path', dn.keys()[0]))
+ return self._data.get(dn.get('path', list(dn.keys())[0]))
def _save_pots(self, filename):
with open(filename, 'w') as f:
diff --git a/ybd/release_note.py b/ybd/release_note.py
index 19c0e65..53a4a90 100644
--- a/ybd/release_note.py
+++ b/ybd/release_note.py
@@ -17,10 +17,12 @@
import os
from subprocess import check_output
import tempfile
-import app
-from app import chdir, config, log
-from morphs import Morphs
-from repos import explore, get_last_tag, get_repo_name, mirror, mirror_has_ref
+from ybd import app
+from ybd.config import config
+from ybd.morphs import Morphs
+from ybd.repos import explore, get_last_tag, get_repo_name
+from ybd.repos import mirror, mirror_has_ref
+from ybd.utils import log, chdir
def do_release_note(release_note):
diff --git a/ybd/repos.py b/ybd/repos.py
index c52adcc..c8eedf8 100644
--- a/ybd/repos.py
+++ b/ybd/repos.py
@@ -22,8 +22,8 @@ import string
from subprocess import call, check_output
import sys
import requests
-import app
-import utils
+from ybd import utils, config
+from ybd.utils import log
import tempfile
@@ -38,7 +38,7 @@ else:
def get_repo_url(repo):
if repo:
- for alias, url in app.config.get('aliases', {}).items():
+ for alias, url in config.config.get('aliases', {}).items():
repo = repo.replace(alias, url)
if repo[:4] == "http" and not repo.endswith('.git'):
repo = repo + '.git'
@@ -63,7 +63,7 @@ def get_repo_name(repo):
def get_version(gitdir, ref='HEAD'):
try:
- with app.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
version = check_output(['git', 'describe', '--tags', '--dirty'],
stderr=fnull)[0:-1]
tag = check_output(['git', 'describe', '--abbrev=0',
@@ -79,7 +79,7 @@ def get_version(gitdir, ref='HEAD'):
def get_last_tag(gitdir):
try:
- with app.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
tag = check_output(['git', 'describe', '--abbrev=0',
'--tags', 'HEAD'], stderr=fnull)[0:-1]
return tag
@@ -89,28 +89,28 @@ def get_last_tag(gitdir):
def get_tree(dn):
ref = str(dn['ref'])
- gitdir = os.path.join(app.config['gits'], get_repo_name(dn['repo']))
+ gitdir = os.path.join(config.config['gits'], get_repo_name(dn['repo']))
if dn['repo'].startswith('file://') or dn['repo'].startswith('/'):
gitdir = dn['repo'].replace('file://', '')
if not os.path.isdir(gitdir):
- app.log(dn, 'Git repo not found:', dn['repo'], exit=True)
+ log(dn, 'Git repo not found:', dn['repo'], exit=True)
if not os.path.exists(gitdir):
try:
params = {'repo': get_repo_url(dn['repo']), 'ref': ref}
- r = requests.get(url=app.config['tree-server'], params=params)
+ r = requests.get(url=config.config['tree-server'], params=params)
return r.json()['tree']
except:
- if app.config.get('tree-server'):
- app.log(dn, 'WARNING: no tree from tree-server for', ref)
+ if config.config.get('tree-server'):
+ log(dn, 'WARNING: no tree from tree-server for', ref)
mirror(dn['name'], dn['repo'])
- with app.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,
stderr=fnull):
# can't resolve ref. is it upstream?
- app.log(dn, 'Fetching from upstream to resolve %s' % ref)
+ log(dn, 'Fetching from upstream to resolve %s' % ref)
update_mirror(dn['name'], dn['repo'], gitdir)
try:
@@ -121,66 +121,66 @@ def get_tree(dn):
except:
# either we don't have a git dir, or ref is not unique
# or ref does not exist
- app.log(dn, 'No tree for ref', (ref, gitdir), exit=True)
+ log(dn, 'No tree for ref', (ref, gitdir), exit=True)
def mirror(name, repo):
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
tmpdir = tempfile.mkdtemp()
repo_url = get_repo_url(repo)
try:
tar_file = get_repo_name(repo_url) + '.tar'
- app.log(name, 'Try fetching tarball %s' % tar_file)
+ log(name, 'Try fetching tarball %s' % tar_file)
# try tarball first
- with app.chdir(tmpdir), open(os.devnull, "w") as fnull:
- call(['wget', os.path.join(app.config['tar-url'], tar_file)],
+ with utils.chdir(tmpdir), open(os.devnull, "w") as fnull:
+ call(['wget', os.path.join(config.config['tar-url'], tar_file)],
stdout=fnull, stderr=fnull)
call(['tar', 'xf', tar_file], stderr=fnull)
call(['git', 'config', 'gc.autodetach', 'false'], stderr=fnull)
os.remove(tar_file)
update_mirror(name, repo, tmpdir)
except:
- app.log(name, 'Try git clone from', repo_url)
+ log(name, 'Try git clone from', repo_url)
with open(os.devnull, "w") as fnull:
if call(['git', 'clone', '--mirror', '-n', repo_url, tmpdir]):
- app.log(name, 'Failed to clone', repo, exit=True)
+ log(name, 'Failed to clone', repo, exit=True)
- with app.chdir(tmpdir):
+ with utils.chdir(tmpdir):
if call(['git', 'rev-parse']):
- app.log(name, 'Problem mirroring git repo at', tmpdir, exit=True)
+ log(name, 'Problem mirroring git repo at', tmpdir, exit=True)
- gitdir = os.path.join(app.config['gits'], get_repo_name(repo))
+ gitdir = os.path.join(config.config['gits'], get_repo_name(repo))
try:
shutil.move(tmpdir, gitdir)
- app.log(name, 'Git repo is mirrored at', gitdir)
+ log(name, 'Git repo is mirrored at', gitdir)
except:
pass
def fetch(repo):
- with app.chdir(repo), open(os.devnull, "w") as fnull:
+ with utils.chdir(repo), open(os.devnull, "w") as fnull:
call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)
def mirror_has_ref(gitdir, ref):
- with app.chdir(gitdir), open(os.devnull, "w") as fnull:
+ with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
out = call(['git', 'cat-file', '-t', ref], stdout=fnull, stderr=fnull)
return out == 0
def update_mirror(name, repo, gitdir):
- with app.chdir(gitdir), open(os.devnull, "w") as fnull:
- app.log(name, 'Refreshing mirror for %s' % repo)
+ with utils.chdir(gitdir), open(os.devnull, "w") as fnull:
+ log(name, 'Refreshing mirror for %s' % repo)
repo_url = get_repo_url(repo)
if call(['git', 'fetch', repo_url, '+refs/*:refs/*', '--prune'],
stdout=fnull, stderr=fnull):
- app.log(name, 'Git update mirror failed', repo, exit=True)
+ log(name, 'Git update mirror failed', repo, exit=True)
def checkout(dn):
_checkout(dn['name'], dn['repo'], dn['ref'], dn['checkout'])
- with app.chdir(dn['checkout']):
+ with utils.chdir(dn['checkout']):
if os.path.exists('.gitmodules') or dn.get('submodules'):
checkout_submodules(dn)
@@ -188,7 +188,7 @@ def checkout(dn):
def _checkout(name, repo, ref, checkout):
- gitdir = os.path.join(app.config['gits'], get_repo_name(repo))
+ gitdir = os.path.join(config.config['gits'], get_repo_name(repo))
if not os.path.exists(gitdir):
mirror(name, repo)
elif not mirror_has_ref(gitdir, ref):
@@ -202,19 +202,19 @@ def _checkout(name, repo, ref, checkout):
# removed --no-hardlinks, though.
if call(['git', 'clone', '--no-hardlinks', gitdir, checkout],
stdout=fnull, stderr=fnull):
- app.log(name, 'Git clone failed for', gitdir, exit=True)
+ log(name, 'Git clone failed for', gitdir, exit=True)
- with app.chdir(checkout):
+ with utils.chdir(checkout):
if call(['git', 'checkout', '--force', ref], stdout=fnull,
stderr=fnull):
- app.log(name, 'Git checkout failed for', ref, exit=True)
+ log(name, 'Git checkout failed for', ref, exit=True)
- app.log(name, 'Git checkout %s in %s' % (repo, checkout))
- app.log(name, 'Upstream version %s' % get_version(checkout, ref))
+ log(name, 'Git checkout %s in %s' % (repo, checkout))
+ log(name, 'Upstream version %s' % get_version(checkout, ref))
def source_date_epoch(checkout):
- with app.chdir(checkout):
+ with utils.chdir(checkout):
return check_output(['git', 'log', '-1', '--pretty=%ct'])[:-1]
@@ -224,7 +224,7 @@ def extract_commit(name, repo, ref, target_dir):
function is much quicker when you don't need to copy the whole repo into
target_dir.
'''
- gitdir = os.path.join(app.config['gits'], get_repo_name(repo))
+ gitdir = os.path.join(config.config['gits'], get_repo_name(repo))
if not os.path.exists(gitdir):
mirror(name, repo)
elif not mirror_has_ref(gitdir, ref):
@@ -235,19 +235,19 @@ def extract_commit(name, repo, ref, target_dir):
git_env['GIT_INDEX_FILE'] = git_index_file.name
git_env['GIT_WORK_TREE'] = target_dir
- app.log(name, 'Extracting commit', ref)
+ log(name, 'Extracting commit', ref)
if call(['git', 'read-tree', ref], env=git_env, cwd=gitdir):
- app.log(name, 'git read-tree failed for', ref, exit=True)
- app.log(name, 'Then checkout index', ref)
+ log(name, 'git read-tree failed for', ref, exit=True)
+ log(name, 'Then checkout index', ref)
if call(['git', 'checkout-index', '--all'], env=git_env, cwd=gitdir):
- app.log(name, 'Git checkout-index failed for', ref, exit=True)
- app.log(name, 'Done', ref)
+ log(name, 'Git checkout-index failed for', ref, exit=True)
+ log(name, 'Done', ref)
utils.set_mtime_recursively(target_dir)
def checkout_submodules(dn):
- app.log(dn, 'Checking git submodules')
+ log(dn, 'Checking git submodules')
with open('.gitmodules', "r") as gitfile:
# drop indentation in sections, as RawConfigParser cannot handle it
content = '\n'.join([l.strip() for l in gitfile.read().splitlines()])
@@ -261,34 +261,28 @@ def checkout_submodules(dn):
path = parser.get(section, 'path')
try:
url = dn['submodules'][path]['url']
- app.log(dn, 'Processing submodule %s from' % path, url)
+ log(dn, 'Processing submodule %s from' % path, url)
except:
url = parser.get(section, 'url')
- app.log(dn, 'WARNING: fallback to submodule %s from' % path, url)
+ log(dn, 'WARNING: fallback to submodule %s from' % path, url)
- try:
- # list objects in the parent repo tree to find the commit
- # object that corresponds to the submodule
- commit = check_output(['git', 'ls-tree', dn['ref'], path])
-
- # read the commit hash from the output
- fields = commit.split()
- if len(fields) >= 2 and fields[1] == 'commit':
- submodule_commit = commit.split()[2]
+ # list objects in the parent repo tree to find the commit
+ # object that corresponds to the submodule
+ commit = check_output(['git', 'ls-tree', dn['ref'], path]).split()
- # fail if the commit hash is invalid
- if len(submodule_commit) != 40:
- raise Exception
+ # read the commit hash from the output
+ fields = list(map(lambda x: x.decode('unicode-escape'), commit))
+ if len(fields) >= 2 and fields[1] == 'commit':
+ submodule_commit = fields[2]
- fulldir = os.path.join(os.getcwd(), path)
- _checkout(dn['name'], url, submodule_commit, fulldir)
+ # fail if the commit hash is invalid
+ if len(submodule_commit) != 40:
+ raise Exception
- else:
- app.log(dn, 'Skipping submodule %s, not a commit:' % path,
- fields)
-
- except:
- app.log(dn, "Git submodules problem", exit=True)
+ fulldir = os.path.join(os.getcwd(), path)
+ _checkout(dn['name'], url, submodule_commit, fulldir)
+ else:
+ app.log(dn, 'Skipping submodule %s, not a commit:' % path, fields)
@contextlib.contextmanager
diff --git a/ybd/sandbox.py b/ybd/sandbox.py
index cfaed2d..6165149 100644
--- a/ybd/sandbox.py
+++ b/ybd/sandbox.py
@@ -24,11 +24,9 @@ import stat
import tempfile
from subprocess import call, PIPE
-import app
-import cache
-import utils
-from repos import get_repo_url
-
+from ybd import app, cache, utils, config
+from ybd.repos import get_repo_url
+from ybd.utils import log
# This must be set to a sandboxlib backend before the run_sandboxed() function
# can be used.
@@ -37,20 +35,20 @@ executor = None
@contextlib.contextmanager
def setup(dn):
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
dn['sandbox'] = tempfile.mkdtemp()
- os.environ['TMPDIR'] = app.config['tmp']
- app.config['sandboxes'] += [dn['sandbox']]
+ os.environ['TMPDIR'] = config.config['tmp']
+ config.config['sandboxes'] += [dn['sandbox']]
dn['checkout'] = os.path.join(dn['sandbox'], dn['name'] + '.build')
dn['install'] = os.path.join(dn['sandbox'], dn['name'] + '.inst')
dn['baserockdir'] = os.path.join(dn['install'], 'baserock')
dn['tmp'] = os.path.join(dn['sandbox'], 'tmp')
for directory in ['checkout', 'install', 'tmp', 'baserockdir']:
os.makedirs(dn[directory])
- dn['log'] = os.path.join(app.config['artifacts'],
+ dn['log'] = os.path.join(config.config['artifacts'],
dn['cache'] + '.build-log')
- if app.config.get('instances'):
- dn['log'] += '.' + str(app.config.get('fork', 0))
+ if config.config.get('instances'):
+ dn['log'] += '.' + str(config.config.get('fork', 0))
assembly_dir = dn['sandbox']
for directory in ['dev', 'tmp']:
call(['mkdir', '-p', os.path.join(assembly_dir, directory)])
@@ -59,15 +57,16 @@ def setup(dn):
yield
except app.RetryException as e:
raise e
- except:
+ except Exception as e:
import traceback
- app.log(dn, 'ERROR: surprise exception in sandbox', '')
+ log(dn, 'ERROR: surprise exception in sandbox', '')
traceback.print_exc()
- app.log(dn, 'Sandbox debris is at', dn['sandbox'], exit=True)
+ log(dn, 'Sandbox debris is at', dn['sandbox'])
+ raise e
finally:
pass
- app.log(dn, "Removing sandbox dir", dn['sandbox'], verbose=True)
+ log(dn, "Removing sandbox dir", dn['sandbox'], verbose=True)
app.remove_dir(dn['sandbox'])
@@ -76,9 +75,9 @@ def install(dn, component):
if os.path.exists(os.path.join(dn['sandbox'], 'baserock',
component['name'] + '.meta')):
return
- app.log(dn, 'Sandbox: installing %s' % component['cache'], verbose=True)
+ log(dn, 'Sandbox: installing %s' % component['cache'], verbose=True)
if cache.get_cache(component) is False:
- app.log(dn, 'Unable to get cache for', component['name'], exit=True)
+ log(dn, 'Unable to get cache for', component['name'], exit=True)
unpackdir = cache.get_cache(component) + '.unpacked'
if dn.get('kind') is 'system':
utils.copy_all_files(unpackdir, dn['sandbox'])
@@ -95,7 +94,7 @@ def ldconfig(dn):
run_logged(dn, cmd_list)
os.environ['PATH'] = path
else:
- app.log(dn, 'No %s, not running ldconfig' % conf)
+ log(dn, 'No %s, not running ldconfig' % conf)
def argv_to_string(argv):
@@ -105,7 +104,7 @@ def argv_to_string(argv):
def run_sandboxed(dn, command, env=None, allow_parallel=False):
global executor
- app.log(dn, 'Running command:\n%s' % command)
+ log(dn, 'Running command:\n%s' % command)
with open(dn['log'], "a") as logfile:
logfile.write("# # %s\n" % command)
@@ -114,11 +113,11 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
if dn.get('build-mode') == 'bootstrap':
# bootstrap mode: builds have some access to the host system, so they
# can use the compilers etc.
- tmpdir = app.config.get("TMPDIR", "/tmp")
+ tmpdir = config.config.get("TMPDIR", "/tmp")
writable_paths = [dn['checkout'], dn['install'], tmpdir, ]
- config = dict(
+ cfg = dict(
cwd=dn['checkout'],
filesystem_root='/',
filesystem_writable_paths=writable_paths,
@@ -139,7 +138,7 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
writable_paths = [dn['name'] + '.build', dn['name'] + '.inst',
'/dev', '/proc', '/tmp', ]
- config = dict(
+ cfg = dict(
cwd=dn['name'] + '.build',
filesystem_root=dn['sandbox'],
filesystem_writable_paths=writable_paths,
@@ -154,7 +153,7 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
# the child process in a chroot, the required string-escape
# python module is already in memory and no attempt to
# lazy load it in the chroot is made.
- unused = "Some Text".encode('string-escape')
+ unused = "Some Text".encode('unicode-escape')
argv = ['sh', '-c', '-e', command]
@@ -162,51 +161,51 @@ def run_sandboxed(dn, command, env=None, allow_parallel=False):
# Adjust config for what the backend is capable of. The user will be warned
# about any changes made.
- config = executor.degrade_config_for_capabilities(config, warn=False)
+ cfg = executor.degrade_config_for_capabilities(cfg, warn=False)
try:
if not allow_parallel:
env.pop("MAKEFLAGS", None)
- app.log_env(dn['log'], env, argv_to_string(argv))
+ utils.log_env(dn['log'], env, argv_to_string(argv))
with open(dn['log'], "a") as logfile:
exit_code = 99
try:
exit_code = executor.run_sandbox_with_redirection(
argv, stdout=logfile, stderr=sandboxlib.STDOUT,
- env=env, **config)
+ env=env, **cfg)
except:
import traceback
traceback.print_exc()
- app.log('SANDBOX', 'ERROR: in run_sandbox_with_redirection',
- exit_code)
+ log('SANDBOX', 'ERROR: in run_sandbox_with_redirection',
+ exit_code)
if exit_code != 0:
- app.log(dn, 'ERROR: command failed in directory %s:\n\n' %
+ log(dn, 'ERROR: command failed in directory %s:\n\n' %
os.getcwd(), argv_to_string(argv))
call(['tail', '-n', '200', dn['log']])
- app.log(dn, 'ERROR: log file is at', dn['log'])
- app.log(dn, 'Sandbox debris is at', dn['sandbox'], exit=True)
+ log(dn, 'ERROR: log file is at', dn['log'])
+ log(dn, 'Sandbox debris is at', dn['sandbox'], exit=True)
finally:
if cur_makeflags is not None:
env['MAKEFLAGS'] = cur_makeflags
def run_logged(dn, cmd_list):
- app.log_env(dn['log'], os.environ, argv_to_string(cmd_list))
+ utils.log_env(dn['log'], os.environ, argv_to_string(cmd_list))
with open(dn['log'], "a") as logfile:
if call(cmd_list, stdin=PIPE, stdout=logfile, stderr=logfile):
- app.log(dn, 'ERROR: command failed in directory %s:\n\n' %
+ log(dn, 'ERROR: command failed in directory %s:\n\n' %
os.getcwd(), argv_to_string(cmd_list))
call(['tail', '-n', '200', dn['log']])
- app.log(dn, 'Log file is at', dn['log'], exit=True)
+ log(dn, 'Log file is at', dn['log'], exit=True)
def run_extension(dn, deployment, step, method):
- app.log(dn, 'Running %s extension:' % step, method)
+ log(dn, 'Running %s extension:' % step, method)
extensions = utils.find_extensions()
- tempfile.tempdir = app.config['tmp']
+ tempfile.tempdir = config.config['tmp']
cmd_tmp = tempfile.NamedTemporaryFile(delete=False)
cmd_bin = extensions[step][method]
@@ -214,11 +213,11 @@ def run_extension(dn, deployment, step, method):
if 'PYTHONPATH' in os.environ:
envlist.append('PYTHONPATH=%s:%s' % (os.environ['PYTHONPATH'],
- app.config['extsdir']))
+ config.config['extsdir']))
else:
- envlist.append('PYTHONPATH=%s' % app.config['extsdir'])
+ envlist.append('PYTHONPATH=%s' % config.config['extsdir'])
- for key, value in deployment.iteritems():
+ for key, value in deployment.items():
if key.isupper():
envlist.append("%s=%s" % (key, value))
@@ -231,15 +230,16 @@ def run_extension(dn, deployment, step, method):
command.append(deployment.get('location') or
deployment.get('upgrade-location'))
- with app.chdir(app.config['defdir']):
+ with utils.chdir(config.config['defdir']):
try:
with open(cmd_bin, "r") as infh:
- shutil.copyfileobj(infh, cmd_tmp)
+ with open(cmd_tmp.name, "w") as outfh:
+ shutil.copyfileobj(infh, outfh)
cmd_tmp.close()
os.chmod(cmd_tmp.name, 0o700)
if call(command):
- app.log(dn, 'ERROR: %s extension failed:' % step, cmd_bin)
+ log(dn, 'ERROR: %s extension failed:' % step, cmd_bin)
raise SystemExit
finally:
os.remove(cmd_tmp.name)
@@ -247,13 +247,13 @@ def run_extension(dn, deployment, step, method):
def ccache_mounts(dn, ccache_target):
- if app.config['no-ccache'] or 'repo' not in dn:
+ if config.config['no-ccache'] or 'repo' not in dn:
mounts = []
else:
name = os.path.basename(get_repo_url(dn['repo']))
if name.endswith('.git'):
name = name[:-4]
- ccache_dir = os.path.join(app.config['ccache_dir'], name)
+ ccache_dir = os.path.join(config.config['ccache_dir'], name)
if not os.path.isdir(ccache_dir):
os.mkdir(ccache_dir)
@@ -265,7 +265,7 @@ def env_vars_for_build(dn):
env = {}
extra_path = []
- if app.config['no-ccache']:
+ if config.config['no-ccache']:
ccache_path = []
else:
ccache_path = ['/usr/lib/ccache']
@@ -274,13 +274,13 @@ def env_vars_for_build(dn):
f for f in ('/baserock/binutils.meta',
'/baserock/eglibc.meta',
'/baserock/gcc.meta') if os.path.exists(f))
- if not app.config.get('no-distcc'):
+ if not config.config.get('no-distcc'):
env['CCACHE_PREFIX'] = 'distcc'
prefixes = []
for name in dn.get('build-depends', []):
- dependency = app.defs.get(name)
+ dependency = config.defs.get(name)
prefixes.append(dependency.get('prefix', '/usr'))
prefixes = set(prefixes)
for prefix in prefixes:
@@ -291,15 +291,16 @@ def env_vars_for_build(dn):
if dn.get('build-mode') == 'bootstrap':
rel_path = extra_path + ccache_path
full_path = [os.path.normpath(dn['sandbox'] + p) for p in rel_path]
- path = full_path + app.config['base-path']
+ path = full_path + config.config['base-path']
env['DESTDIR'] = dn.get('install')
else:
- path = extra_path + ccache_path + app.config['base-path']
+ path = extra_path + ccache_path + config.config['base-path']
env['DESTDIR'] = os.path.join('/', os.path.basename(dn.get('install')))
env['PATH'] = ':'.join(path)
env['PREFIX'] = dn.get('prefix') or '/usr'
- env['MAKEFLAGS'] = '-j%s' % (dn.get('max-jobs') or app.config['max-jobs'])
+ env['MAKEFLAGS'] = '-j%s' % (dn.get('max-jobs') or
+ config.config['max-jobs'])
env['TERM'] = 'dumb'
env['SHELL'] = '/bin/sh'
env['USER'] = env['USERNAME'] = env['LOGNAME'] = 'tomjon'
@@ -307,8 +308,8 @@ def env_vars_for_build(dn):
env['HOME'] = '/tmp'
env['TZ'] = 'UTC'
- arch = app.config['arch']
- cpu = app.config['cpu']
+ arch = config.config['arch']
+ cpu = config.config['cpu']
abi = ''
if arch.startswith(('armv7', 'armv5')):
abi = 'eabi'
@@ -317,8 +318,8 @@ def env_vars_for_build(dn):
env['TARGET'] = cpu + '-baserock-linux-gnu' + abi
env['TARGET_STAGE1'] = cpu + '-bootstrap-linux-gnu' + abi
env['MORPH_ARCH'] = arch
- env['DEFINITIONS_REF'] = app.config['def-version']
- env['PROGRAM_REF'] = app.config['my-version']
+ env['DEFINITIONS_REF'] = config.config['def-version']
+ env['PROGRAM_REF'] = config.config['my-version']
if dn.get('SOURCE_DATE_EPOCH'):
env['SOURCE_DATE_EPOCH'] = dn['SOURCE_DATE_EPOCH']
@@ -338,18 +339,18 @@ def create_devices(dn):
raise IOError('Cannot create device node %s,'
'unrecognized device type "%s"'
% (destfile, device['type']))
- app.log(dn, "Creating device node", destfile)
+ log(dn, "Creating device node", destfile)
os.mknod(destfile, mode, os.makedev(device['major'], device['minor']))
os.chown(destfile, device['uid'], device['gid'])
def list_files(component):
- app.log(component, 'Sandbox %s contains\n' % component['sandbox'],
- os.listdir(component['sandbox']))
+ log(component, 'Sandbox %s contains\n' % component['sandbox'],
+ os.listdir(component['sandbox']))
try:
files = os.listdir(os.path.join(component['sandbox'], 'baserock'))
- app.log(component,
- 'Baserock directory contains %s items\n' % len(files),
- sorted(files))
+ log(component,
+ 'Baserock directory contains %s items\n' % len(files),
+ sorted(files))
except:
- app.log(component, 'No baserock directory in', component['sandbox'])
+ log(component, 'No baserock directory in', component['sandbox'])
diff --git a/ybd/splitting.py b/ybd/splitting.py
index 38ead98..f435b9a 100644
--- a/ybd/splitting.py
+++ b/ybd/splitting.py
@@ -14,13 +14,12 @@
#
# =*= License: GPL-2 =*=
-import app
-from app import config, log, chdir
-from cache import get_cache
+from ybd import app, config
+from ybd.cache import get_cache
import os
import re
import yaml
-from utils import copy_file_list
+from ybd.utils import chdir, copy_file_list, log
def install_split_artifacts(dn):
@@ -31,10 +30,9 @@ def install_split_artifacts(dn):
sandbox to the dn['install']
'''
-
for content in dn['contents']:
- key = content.keys()[0]
- stratum = app.defs.get(key)
+ key = list(content.keys())[0]
+ stratum = config.defs.get(key)
move_required_files(dn, stratum, content[key])
@@ -68,7 +66,7 @@ def move_required_files(dn, stratum, artifacts):
yaml.safe_dump(split_stratum_metadata, f, default_flow_style=False)
for path in stratum['contents']:
- chunk = app.defs.get(path)
+ chunk = config.defs.get(path)
if chunk.get('build-mode', 'staging') == 'bootstrap':
continue
@@ -80,7 +78,7 @@ def move_required_files(dn, stratum, artifacts):
split_metadata = {'ref': metadata.get('ref'),
'repo': metadata.get('repo'),
'products': []}
- if config.get('artifact-version', 0) not in range(0, 1):
+ if config.config.get('artifact-version', 0) not in range(0, 1):
metadata['cache'] = dn.get('cache')
for product in metadata['products']:
@@ -108,13 +106,13 @@ def move_required_files(dn, stratum, artifacts):
def check_overlaps(dn):
- if set(config['new-overlaps']) <= set(config['overlaps']):
- config['new-overlaps'] = []
+ if set(config.config['new-overlaps']) <= set(config.config['overlaps']):
+ config.config['new-overlaps'] = []
return
overlaps_found = False
- config['new-overlaps'] = list(set(config['new-overlaps']))
- for path in config['new-overlaps']:
+ config.config['new-overlaps'] = list(set(config.config['new-overlaps']))
+ for path in config.config['new-overlaps']:
log(dn, 'WARNING: overlapping path', path)
for filename in os.listdir(dn['baserockdir']):
with open(os.path.join(dn['baserockdir'], filename)) as f:
@@ -123,10 +121,11 @@ def check_overlaps(dn):
log(filename, 'WARNING: overlap at', path[1:])
overlaps_found = True
break
- if config.get('check-overlaps') == 'exit':
- log(dn, 'Overlaps found', config['new-overlaps'], exit=True)
- config['overlaps'] = list(set(config['new-overlaps'] + config['overlaps']))
- config['new-overlaps'] = []
+ if config.config.get('check-overlaps') == 'exit':
+ log(dn, 'Overlaps found', config.config['new-overlaps'], exit=True)
+ config.config['overlaps'] = list(set(config.config['new-overlaps'] +
+ config.config['overlaps']))
+ config.config['new-overlaps'] = []
def get_metadata(dn):
@@ -157,7 +156,8 @@ def compile_rules(dn):
regexps = []
splits = {}
split_rules = dn.get('products', [])
- default_rules = app.defs.defaults.get_split_rules(dn.get('kind', 'chunk'))
+ default_rules = config.defs.defaults.get_split_rules(
+ dn.get('kind', 'chunk'))
for rules in split_rules, default_rules:
for rule in rules:
regexp = re.compile('^(?:' + '|'.join(rule.get('include')) + ')$')
@@ -175,7 +175,7 @@ def write_metadata(dn):
write_chunk_metafile(dn)
elif dn.get('kind', 'chunk') == 'stratum':
write_stratum_metafiles(dn)
- if config.get('check-overlaps', 'ignore') != 'ignore':
+ if config.config.get('check-overlaps', 'ignore') != 'ignore':
check_overlaps(dn)
@@ -214,7 +214,7 @@ def write_stratum_metafiles(stratum):
rules, splits = compile_rules(stratum)
for item in stratum['contents']:
- chunk = app.defs.get(item)
+ chunk = config.defs.get(item)
if chunk.get('build-mode', 'staging') == 'bootstrap':
continue
@@ -223,10 +223,10 @@ def write_stratum_metafiles(stratum):
'repo': metadata.get('repo'),
'products': []}
- if config.get('artifact-version', 0) not in range(0, 1):
+ if config.config.get('artifact-version', 0) not in range(0, 1):
split_metadata['cache'] = metadata.get('cache')
- chunk_artifacts = app.defs.get(chunk).get('artifacts', {})
+ chunk_artifacts = config.defs.get(chunk).get('artifacts', {})
for artifact, target in chunk_artifacts.items():
splits[target].append(artifact)
@@ -254,11 +254,11 @@ def write_metafile(rules, splits, dn):
metadata['repo'] = dn.get('repo')
metadata['ref'] = dn.get('ref')
else:
- if config.get('artifact-version', 0) not in range(0, 2):
- metadata['repo'] = config['defdir']
- metadata['ref'] = config['def-version']
+ if config.config.get('artifact-version', 0) not in range(0, 2):
+ metadata['repo'] = config.config['defdir']
+ metadata['ref'] = config.config['def-version']
- if config.get('artifact-version', 0) not in range(0, 1):
+ if config.config.get('artifact-version', 0) not in range(0, 1):
metadata['cache'] = dn.get('cache')
meta = os.path.join(dn['baserockdir'], dn['name'] + '.meta')
diff --git a/ybd/utils.py b/ybd/utils.py
index 13a61c8..34791e6 100644
--- a/ybd/utils.py
+++ b/ybd/utils.py
@@ -14,16 +14,25 @@
#
# =*= License: GPL-2 =*=
+import datetime
import gzip
import tarfile
import contextlib
import os
import shutil
import stat
+import sys
from fs.osfs import OSFS
from fs.multifs import MultiFS
import calendar
-import app
+from ybd import config
+
+try:
+ from riemann_client.transport import TCPTransport
+ from riemann_client.client import QueuedClient
+ riemann_available = True
+except ImportError:
+ riemann_available = False
# The magic number for timestamps: 2011-11-11 11:11:11
default_magic_timestamp = calendar.timegm([2011, 11, 11, 11, 11, 11])
@@ -133,9 +142,16 @@ def hardlink_all_files(srcpath, destpath):
_process_tree(destpath, srcpath, destpath, os.link)
+def elapsed(starttime):
+ td = datetime.datetime.now() - starttime
+ hours, remainder = divmod(int(td.total_seconds()), 60*60)
+ minutes, seconds = divmod(remainder, 60)
+ return "%02d:%02d:%02d" % (hours, minutes, seconds)
+
+
def _process_tree(root, srcpath, destpath, actionfunc):
if os.path.lexists(destpath):
- app.log('OVERLAPS', 'WARNING: overlap at', destpath, verbose=True)
+ log('OVERLAPS', 'WARNING: overlap at', destpath, verbose=True)
file_stat = os.lstat(srcpath)
mode = file_stat.st_mode
@@ -150,10 +166,10 @@ def _process_tree(root, srcpath, destpath, actionfunc):
except:
import traceback
traceback.print_exc()
- print 'destpath is', destpath
- print 'realpath is', realpath
+ print('destpath is', destpath)
+ print('realpath is', realpath)
- app.log('UTILS', 'ERROR: file operation failed', exit=True)
+ log('UTILS', 'ERROR: file operation failed', exit=True)
if not stat.S_ISDIR(dest_stat.st_mode):
raise IOError('Destination not a directory: source has %s'
@@ -170,7 +186,7 @@ def _process_tree(root, srcpath, destpath, actionfunc):
import re
path = re.search('/.*$', re.search('tmp[^/]+/.*$',
destpath).group(0)).group(0)
- app.config['new-overlaps'] += [path]
+ config.config['new-overlaps'] += [path]
try:
os.unlink(destpath)
except:
@@ -210,9 +226,9 @@ def copy_file_list(srcpath, destpath, filelist):
'''
def _copyfun(inpath, outpath):
- with open(inpath, "r") as infh:
- with open(outpath, "w") as outfh:
- shutil.copyfileobj(infh, outfh, 1024*1024*4)
+ with open(inpath, "r", encoding='utf-8', errors='ignore') as inf:
+ with open(outpath, "w", encoding='utf-8', errors='ignore') as outf:
+ shutil.copyfileobj(inf, outf, 1024*1024*4)
shutil.copystat(inpath, outpath)
_process_list(srcpath, destpath, filelist, _copyfun)
@@ -263,8 +279,8 @@ def _process_list(srcdir, destdir, filelist, actionfunc):
file_stat = os.lstat(srcpath)
mode = file_stat.st_mode
except UnicodeEncodeError as ue:
- app.log("UnicodeErr",
- "Couldn't get lstat info for '%s'." % srcpath)
+ log("UnicodeErr",
+ "Couldn't get lstat info for '%s'." % srcpath)
raise ue
if stat.S_ISDIR(mode):
@@ -356,7 +372,7 @@ def make_deterministic_tar_archive(base_name, root):
'''
- with app.chdir(root), open(base_name + '.tar', 'wb') as f:
+ with chdir(root), open(base_name + '.tar', 'wb') as f:
with tarfile.TarFile(mode='w', fileobj=f) as f_tar:
directories = [d[0] for d in os.walk('.')]
for d in sorted(directories):
@@ -371,8 +387,10 @@ def _find_extensions(paths):
the return dict.'''
extension_kinds = ['check', 'configure', 'write']
+ tfs = OSFS(paths[0])
efs = MultiFS()
- map(lambda x: efs.addfs(x, OSFS(x)), paths)
+ for x in paths:
+ efs.addfs(x, OSFS(x))
def get_extensions(kind):
return {os.path.splitext(x)[0]: efs.getsyspath(x)
@@ -384,7 +402,7 @@ def _find_extensions(paths):
def find_extensions():
'''Scan definitions for extensions.'''
- paths = [app.config['extsdir']]
+ paths = [config.config['extsdir']]
return _find_extensions(paths)
@@ -410,3 +428,85 @@ def monkeypatch(obj, attr, new_value):
setattr(obj, attr, new_value)
yield
setattr(obj, attr, old_value)
+
+
+def log(dn, message='', data='', verbose=False, exit=False):
+ ''' Print a timestamped log. '''
+
+ if exit:
+ print('\n\n')
+ message = 'ERROR: ' + message.replace('WARNING: ', '')
+
+ if verbose is True and config.config.get('log-verbose', False) is False:
+ return
+
+ name = dn['name'] if type(dn) is dict else dn
+
+ timestamp = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S ')
+ if config.config.get('log-timings') == 'elapsed':
+ timestamp = timestamp[:9] + elapsed(config.config['start-time']) + ' '
+ if config.config.get('log-timings', 'omit') == 'omit':
+ timestamp = ''
+ progress = ''
+ if config.config.get('counter'):
+ count = config.config['counter'].get()
+ progress = '[%s/%s/%s] ' % \
+ (count, config.config['tasks'], config.config['total'])
+ entry = '%s%s[%s] %s %s\n' % (timestamp, progress, name, message, data)
+ if config.config.get('instances'):
+ entry = str(config.config.get('fork', 0)) + ' ' + entry
+
+ print(entry),
+ sys.stdout.flush()
+
+ if exit:
+ print('\n\n')
+ os._exit(1)
+
+
+def log_env(log, env, message=''):
+ with open(log, "a") as logfile:
+ for key in sorted(env):
+ msg = env[key] if 'PASSWORD' not in key else '(hidden)'
+ logfile.write('%s=%s\n' % (key, msg))
+ logfile.write(message + '\n\n')
+ logfile.flush()
+
+
+@contextlib.contextmanager
+def chdir(dirname=None):
+ currentdir = os.getcwd()
+ try:
+ if dirname is not None:
+ os.chdir(dirname)
+ yield
+ finally:
+ os.chdir(currentdir)
+
+
+@contextlib.contextmanager
+def timer(dn, message=''):
+ starttime = datetime.datetime.now()
+ log(dn, 'Starting ' + message)
+ if type(dn) is dict:
+ dn['start-time'] = starttime
+ try:
+ yield
+ except:
+ raise
+ text = '' if message == '' else ' for ' + message
+ time_elapsed = elapsed(starttime)
+ log(dn, 'Elapsed time' + text, time_elapsed)
+ log_riemann(dn, 'Timer', text, time_elapsed)
+
+
+def log_riemann(dn, service, text, time_elapsed):
+ if riemann_available and 'riemann-server' in config.config:
+ time_split = time_elapsed.split(':')
+ time_sec = int(time_split[0]) * 3600 \
+ + int(time_split[1]) * 60 + int(time_split[2])
+ with QueuedClient(TCPTransport(config.config['riemann-server'],
+ config.config['riemann-port'],
+ timeout=30)) as client:
+ client.event(service=service, description=text, metric_f=time_sec)
+ client.flush()