diff options
author | Benjamin Schubert <bschubert15@bloomberg.net> | 2019-11-12 10:24:36 +0000 |
---|---|---|
committer | bst-marge-bot <marge-bot@buildstream.build> | 2019-11-12 13:02:30 +0000 |
commit | 7c470b46b33257bb81d70a00cb6b2114d68b9097 (patch) | |
tree | 343fbc043c692b4036b7e4b56e9a7a9493b44bbf /tests | |
parent | d51a51002b2b04f20bfa3376b6fef26add26a88d (diff) | |
download | buildstream-7c470b46b33257bb81d70a00cb6b2114d68b9097.tar.gz |
_remote: Ensure checks done in the subclasses are propagated
Currently, the `BaseRemote` would call `_check()` on the children, which
states that errors should be sent back as a string. However,
`BaseRemote` doesn't check the return of `_check()`.
This changes the contract so that subclasses throw `RemoteError`
themselves.
This also fixes the `ArtifactShare` and add a test.
Diffstat (limited to 'tests')
-rw-r--r-- | tests/artifactcache/capabilities.py | 55 | ||||
-rw-r--r-- | tests/testutils/artifactshare.py | 139 |
2 files changed, 149 insertions, 45 deletions
diff --git a/tests/artifactcache/capabilities.py b/tests/artifactcache/capabilities.py new file mode 100644 index 000000000..a28516aea --- /dev/null +++ b/tests/artifactcache/capabilities.py @@ -0,0 +1,55 @@ +# Pylint doesn't play well with fixtures and dependency injection from pytest +# pylint: disable=redefined-outer-name + +import os + +import pytest +from buildstream._project import Project + +from buildstream import _yaml +from buildstream.testing.runcli import cli # pylint: disable=unused-import +from tests.testutils import dummy_context + +from tests.testutils.artifactshare import create_dummy_artifact_share + + +DATA_DIR = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "project", +) + + +@pytest.mark.datafiles(DATA_DIR) +def test_artifact_cache_with_missing_capabilities_is_skipped(cli, tmpdir, datafiles): + project_dir = str(datafiles) + + # Set up an artifact cache. + with create_dummy_artifact_share() as share: + # Configure artifact share + cache_dir = os.path.join(str(tmpdir), 'cache') + user_config_file = str(tmpdir.join('buildstream.conf')) + user_config = { + 'scheduler': { + 'pushers': 1 + }, + 'artifacts': { + 'url': share.repo, + 'push': True, + }, + 'cachedir': cache_dir + } + _yaml.roundtrip_dump(user_config, file=user_config_file) + + with dummy_context(config=user_config_file) as context: + # Load the project + project = Project(project_dir, context) + project.ensure_fully_loaded() + + # Create a local artifact cache handle + artifactcache = context.artifactcache + + # Manually setup the CAS remote + artifactcache.setup_remotes(use_config=True) + + assert not artifactcache.has_fetch_remotes(), \ + "System didn't realize the artifact cache didn't support BuildStream" diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py index 18ecc5e3e..d96612686 100644 --- a/tests/testutils/artifactshare.py +++ b/tests/testutils/artifactshare.py @@ -3,10 +3,12 @@ import shutil import signal import sys from collections import namedtuple - from contextlib import ExitStack, contextmanager +from concurrent import futures from multiprocessing import Process, Queue +import grpc + from buildstream._cas import CASCache from buildstream._cas.casserver import create_server from buildstream._exceptions import CASError @@ -14,40 +16,8 @@ from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution from buildstream._protos.buildstream.v2 import artifact_pb2 -# ArtifactShare() -# -# Abstract class providing scaffolding for -# generating data to be used with various sources -# -# Args: -# directory (str): The base temp directory for the test -# cache_quota (int): Maximum amount of disk space to use -# casd (bool): Allow write access via casd -# -class ArtifactShare(): - - def __init__(self, directory, *, quota=None, casd=False, index_only=False): - - # The working directory for the artifact share (in case it - # needs to do something outside of its backend's storage folder). - # - self.directory = os.path.abspath(directory) - - # The directory the actual repo will be stored in. - # - # Unless this gets more complicated, just use this directly - # in tests as a remote artifact push/pull configuration - # - self.repodir = os.path.join(self.directory, 'repo') - os.makedirs(self.repodir) - self.artifactdir = os.path.join(self.repodir, 'artifacts', 'refs') - os.makedirs(self.artifactdir) - - self.cas = CASCache(self.repodir, casd=casd) - - self.quota = quota - self.index_only = index_only - +class BaseArtifactShare: + def __init__(self): q = Queue() self.process = Process(target=self.run, args=(q,)) @@ -80,14 +50,7 @@ class ArtifactShare(): else: cleanup_on_sigterm() - server = stack.enter_context( - create_server( - self.repodir, - quota=self.quota, - enable_push=True, - index_only=self.index_only, - ) - ) + server = stack.enter_context(self._create_server()) port = server.add_insecure_port('localhost:0') server.start() except Exception: @@ -100,6 +63,80 @@ class ArtifactShare(): # Sleep until termination by signal signal.pause() + # _create_server() + # + # Create the server that will be run in the process + # + def _create_server(self): + raise NotImplementedError() + + # close(): + # + # Remove the artifact share. + # + def close(self): + self.process.terminate() + self.process.join() + + +# DummyArtifactShare() +# +# A dummy artifact share without any capabilities +# +class DummyArtifactShare(BaseArtifactShare): + @contextmanager + def _create_server(self): + max_workers = (os.cpu_count() or 1) * 5 + server = grpc.server(futures.ThreadPoolExecutor(max_workers)) + + yield server + + +# ArtifactShare() +# +# Abstract class providing scaffolding for +# generating data to be used with various sources +# +# Args: +# directory (str): The base temp directory for the test +# cache_quota (int): Maximum amount of disk space to use +# casd (bool): Allow write access via casd +# enable_push (bool): Whether the share should allow pushes +# +class ArtifactShare(BaseArtifactShare): + + def __init__(self, directory, *, quota=None, casd=False, index_only=False): + + # The working directory for the artifact share (in case it + # needs to do something outside of its backend's storage folder). + # + self.directory = os.path.abspath(directory) + + # The directory the actual repo will be stored in. + # + # Unless this gets more complicated, just use this directly + # in tests as a remote artifact push/pull configuration + # + self.repodir = os.path.join(self.directory, 'repo') + os.makedirs(self.repodir) + self.artifactdir = os.path.join(self.repodir, 'artifacts', 'refs') + os.makedirs(self.artifactdir) + + self.cas = CASCache(self.repodir, casd=casd) + + self.quota = quota + self.index_only = index_only + + super().__init__() + + def _create_server(self): + return create_server( + self.repodir, + quota=self.quota, + enable_push=True, + index_only=self.index_only, + ) + # has_object(): # # Checks whether the object is present in the share @@ -180,8 +217,7 @@ class ArtifactShare(): # Remove the artifact share. # def close(self): - self.process.terminate() - self.process.join() + super().close() self.cas.release_resources() @@ -213,6 +249,19 @@ def create_split_share(directory1, directory2, *, quota=None, casd=False): storage.close() +# create_dummy_artifact_share() +# +# Create a dummy artifact share that doesn't have any capabilities +# +@contextmanager +def create_dummy_artifact_share(): + share = DummyArtifactShare() + try: + yield share + finally: + share.close() + + statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize f_bavail') |