diff options
Diffstat (limited to 'docker')
56 files changed, 1602 insertions, 624 deletions
diff --git a/docker/__init__.py b/docker/__init__.py index cf732e1..46beb53 100644 --- a/docker/__init__.py +++ b/docker/__init__.py @@ -1,7 +1,9 @@ # flake8: noqa from .api import APIClient from .client import DockerClient, from_env -from .version import version, version_info +from .context import Context +from .context import ContextAPI +from .tls import TLSConfig +from .version import __version__ -__version__ = version __title__ = 'docker' diff --git a/docker/api/build.py b/docker/api/build.py index 365129a..3a1a3d9 100644 --- a/docker/api/build.py +++ b/docker/api/build.py @@ -12,7 +12,7 @@ from .. import utils log = logging.getLogger(__name__) -class BuildApiMixin(object): +class BuildApiMixin: def build(self, path=None, tag=None, quiet=False, fileobj=None, nocache=False, rm=False, timeout=None, custom_context=False, encoding=None, pull=False, @@ -76,6 +76,7 @@ class BuildApiMixin(object): forcerm (bool): Always remove intermediate containers, even after unsuccessful builds dockerfile (str): path within the build context to the Dockerfile + gzip (bool): If set to ``True``, gzip compression/encoding is used buildargs (dict): A dictionary of build arguments container_limits (dict): A dictionary of limits applied to each container created by the build process. Valid keys: @@ -132,7 +133,7 @@ class BuildApiMixin(object): for key in container_limits.keys(): if key not in constants.CONTAINER_LIMITS_KEYS: raise errors.DockerException( - 'Invalid container_limits key {0}'.format(key) + f'Invalid container_limits key {key}' ) if custom_context: @@ -150,10 +151,10 @@ class BuildApiMixin(object): dockerignore = os.path.join(path, '.dockerignore') exclude = None if os.path.exists(dockerignore): - with open(dockerignore, 'r') as f: + with open(dockerignore) as f: exclude = list(filter( lambda x: x != '' and x[0] != '#', - [l.strip() for l in f.read().splitlines()] + [line.strip() for line in f.read().splitlines()] )) dockerfile = process_dockerfile(dockerfile, path) context = utils.tar( @@ -313,7 +314,7 @@ class BuildApiMixin(object): auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {}) log.debug( - 'Sending auth config ({0})'.format( + 'Sending auth config ({})'.format( ', '.join(repr(k) for k in auth_data.keys()) ) ) @@ -344,9 +345,9 @@ def process_dockerfile(dockerfile, path): if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or os.path.relpath(abs_dockerfile, path).startswith('..')): # Dockerfile not in context - read data to insert into tar later - with open(abs_dockerfile, 'r') as df: + with open(abs_dockerfile) as df: return ( - '.dockerfile.{0:x}'.format(random.getrandbits(160)), + f'.dockerfile.{random.getrandbits(160):x}', df.read() ) diff --git a/docker/api/client.py b/docker/api/client.py index 35dc84e..7733d33 100644 --- a/docker/api/client.py +++ b/docker/api/client.py @@ -1,12 +1,25 @@ import json import struct +import urllib from functools import partial import requests import requests.exceptions -import six import websocket +from .. import auth +from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH, + DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS, + DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM, + MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES) +from ..errors import (DockerException, InvalidVersion, TLSParameterError, + create_api_error_from_http_exception) +from ..tls import TLSConfig +from ..transport import SSLHTTPAdapter, UnixHTTPAdapter +from ..utils import check_resource, config, update_headers, utils +from ..utils.json_stream import json_stream +from ..utils.proxy import ProxyConfig +from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter from .build import BuildApiMixin from .config import ConfigApiMixin from .container import ContainerApiMixin @@ -19,22 +32,7 @@ from .secret import SecretApiMixin from .service import ServiceApiMixin from .swarm import SwarmApiMixin from .volume import VolumeApiMixin -from .. import auth -from ..constants import ( - DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM, - DEFAULT_DOCKER_API_VERSION, MINIMUM_DOCKER_API_VERSION, - STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS_SSH, DEFAULT_NUM_POOLS -) -from ..errors import ( - DockerException, InvalidVersion, TLSParameterError, - create_api_error_from_http_exception -) -from ..tls import TLSConfig -from ..transport import SSLHTTPAdapter, UnixHTTPAdapter -from ..utils import utils, check_resource, update_headers, config -from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor -from ..utils.json_stream import json_stream -from ..utils.proxy import ProxyConfig + try: from ..transport import NpipeHTTPAdapter except ImportError: @@ -91,6 +89,11 @@ class APIClient( user_agent (str): Set a custom user agent for requests to the server. credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. """ __attrs__ = requests.Session.__attrs__ + ['_auth_configs', @@ -102,8 +105,9 @@ class APIClient( def __init__(self, base_url=None, version=None, timeout=DEFAULT_TIMEOUT_SECONDS, tls=False, user_agent=DEFAULT_USER_AGENT, num_pools=None, - credstore_env=None): - super(APIClient, self).__init__() + credstore_env=None, use_ssh_client=False, + max_pool_size=DEFAULT_MAX_POOL_SIZE): + super().__init__() if tls and not base_url: raise TLSParameterError( @@ -138,7 +142,8 @@ class APIClient( if base_url.startswith('http+unix://'): self._custom_adapter = UnixHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size ) self.mount('http+docker://', self._custom_adapter) self._unmount('http://', 'https://') @@ -152,7 +157,8 @@ class APIClient( ) try: self._custom_adapter = NpipeHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size ) except NameError: raise DockerException( @@ -163,7 +169,8 @@ class APIClient( elif base_url.startswith('ssh://'): try: self._custom_adapter = SSHHTTPAdapter( - base_url, timeout, pool_connections=num_pools + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size, shell_out=use_ssh_client ) except NameError: raise DockerException( @@ -183,16 +190,16 @@ class APIClient( self.base_url = base_url # version detection needs to be after unix adapter mounting - if version is None: - self._version = DEFAULT_DOCKER_API_VERSION - elif isinstance(version, six.string_types): - if version.lower() == 'auto': - self._version = self._retrieve_server_version() - else: - self._version = version + if version is None or (isinstance( + version, + str + ) and version.lower() == 'auto'): + self._version = self._retrieve_server_version() else: + self._version = version + if not isinstance(self._version, str): raise DockerException( - 'Version parameter must be a string or None. Found {0}'.format( + 'Version parameter must be a string or None. Found {}'.format( type(version).__name__ ) ) @@ -212,7 +219,7 @@ class APIClient( ) except Exception as e: raise DockerException( - 'Error while fetching server API version: {0}'.format(e) + f'Error while fetching server API version: {e}' ) def _set_request_timeout(self, kwargs): @@ -239,28 +246,28 @@ class APIClient( def _url(self, pathfmt, *args, **kwargs): for arg in args: - if not isinstance(arg, six.string_types): + if not isinstance(arg, str): raise ValueError( - 'Expected a string but found {0} ({1}) ' + 'Expected a string but found {} ({}) ' 'instead'.format(arg, type(arg)) ) - quote_f = partial(six.moves.urllib.parse.quote, safe="/:") + quote_f = partial(urllib.parse.quote, safe="/:") args = map(quote_f, args) if kwargs.get('versioned_api', True): - return '{0}/v{1}{2}'.format( + return '{}/v{}{}'.format( self.base_url, self._version, pathfmt.format(*args) ) else: - return '{0}{1}'.format(self.base_url, pathfmt.format(*args)) + return f'{self.base_url}{pathfmt.format(*args)}' def _raise_for_status(self, response): """Raises stored :class:`APIError`, if one occurred.""" try: response.raise_for_status() except requests.exceptions.HTTPError as e: - raise create_api_error_from_http_exception(e) + raise create_api_error_from_http_exception(e) from e def _result(self, response, json=False, binary=False): assert not (json and binary) @@ -277,7 +284,7 @@ class APIClient( # so we do this disgusting thing here. data2 = {} if data is not None and isinstance(data, dict): - for k, v in six.iteritems(data): + for k, v in iter(data.items()): if v is not None: data2[k] = v elif data is not None: @@ -313,12 +320,10 @@ class APIClient( sock = response.raw._fp.fp.raw.sock elif self.base_url.startswith('http+docker://ssh'): sock = response.raw._fp.fp.channel - elif six.PY3: + else: sock = response.raw._fp.fp.raw if self.base_url.startswith("https://"): sock = sock._sock - else: - sock = response.raw._fp.fp._sock try: # Keep a reference to the response to stop it being garbage # collected. If the response is garbage collected, it will @@ -336,8 +341,7 @@ class APIClient( if response.raw._fp.chunked: if decode: - for chunk in json_stream(self._stream_helper(response, False)): - yield chunk + yield from json_stream(self._stream_helper(response, False)) else: reader = response.raw while not reader.closed: @@ -393,8 +397,13 @@ class APIClient( def _stream_raw_result(self, response, chunk_size=1, decode=True): ''' Stream result for TTY-enabled container and raw binary data''' self._raise_for_status(response) - for out in response.iter_content(chunk_size, decode): - yield out + + # Disable timeout on the underlying socket to prevent + # Read timed out(s) for long running processes + socket = self._get_raw_response_socket(response) + self._disable_socket_timeout(socket) + + yield from response.iter_content(chunk_size, decode) def _read_from_socket(self, response, stream, tty=True, demux=False): socket = self._get_raw_response_socket(response) @@ -458,7 +467,7 @@ class APIClient( self._result(res, binary=True) self._raise_for_status(res) - sep = six.binary_type() + sep = b'' if stream: return self._multiplexed_response_stream_helper(res) else: @@ -472,7 +481,7 @@ class APIClient( def get_adapter(self, url): try: - return super(APIClient, self).get_adapter(url) + return super().get_adapter(url) except requests.exceptions.InvalidSchema as e: if self._custom_adapter: return self._custom_adapter @@ -490,7 +499,7 @@ class APIClient( Args: dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, - otherwise``$HOME/.dockercfg``) + otherwise ``$HOME/.dockercfg``) Returns: None diff --git a/docker/api/config.py b/docker/api/config.py index 93e5168..88c367e 100644 --- a/docker/api/config.py +++ b/docker/api/config.py @@ -1,13 +1,11 @@ import base64 -import six - from .. import utils -class ConfigApiMixin(object): +class ConfigApiMixin: @utils.minimum_version('1.30') - def create_config(self, name, data, labels=None): + def create_config(self, name, data, labels=None, templating=None): """ Create a config @@ -15,6 +13,9 @@ class ConfigApiMixin(object): name (string): Name of the config data (bytes): Config data to be stored labels (dict): A mapping of labels to assign to the config + templating (dict): dictionary containing the name of the + templating driver to be used expressed as + { name: <templating_driver_name>} Returns (dict): ID of the newly created config """ @@ -22,12 +23,12 @@ class ConfigApiMixin(object): data = data.encode('utf-8') data = base64.b64encode(data) - if six.PY3: - data = data.decode('ascii') + data = data.decode('ascii') body = { 'Data': data, 'Name': name, - 'Labels': labels + 'Labels': labels, + 'Templating': templating } url = self._url('/configs/create') diff --git a/docker/api/container.py b/docker/api/container.py index 45bd352..f600be1 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -1,7 +1,5 @@ from datetime import datetime -import six - from .. import errors from .. import utils from ..constants import DEFAULT_DATA_CHUNK_SIZE @@ -12,7 +10,7 @@ from ..types import HostConfig from ..types import NetworkingConfig -class ContainerApiMixin(object): +class ContainerApiMixin: @utils.check_resource('container') def attach(self, container, stdout=True, stderr=True, stream=False, logs=False, demux=False): @@ -225,7 +223,7 @@ class ContainerApiMixin(object): mac_address=None, labels=None, stop_signal=None, networking_config=None, healthcheck=None, stop_timeout=None, runtime=None, - use_config_proxy=True): + use_config_proxy=True, platform=None): """ Creates a container. Parameters are similar to those for the ``docker run`` command except it doesn't support the attach options (``-a``). @@ -244,9 +242,9 @@ class ContainerApiMixin(object): .. code-block:: python - container_id = cli.create_container( + container_id = client.api.create_container( 'busybox', 'ls', ports=[1111, 2222], - host_config=cli.create_host_config(port_bindings={ + host_config=client.api.create_host_config(port_bindings={ 1111: 4567, 2222: None }) @@ -258,22 +256,24 @@ class ContainerApiMixin(object): .. code-block:: python - cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)}) + client.api.create_host_config( + port_bindings={1111: ('127.0.0.1', 4567)} + ) Or without host port assignment: .. code-block:: python - cli.create_host_config(port_bindings={1111: ('127.0.0.1',)}) + client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)}) If you wish to use UDP instead of TCP (default), you need to declare ports as such in both the config and host config: .. code-block:: python - container_id = cli.create_container( + container_id = client.api.create_container( 'busybox', 'ls', ports=[(1111, 'udp'), 2222], - host_config=cli.create_host_config(port_bindings={ + host_config=client.api.create_host_config(port_bindings={ '1111/udp': 4567, 2222: None }) ) @@ -283,7 +283,7 @@ class ContainerApiMixin(object): .. code-block:: python - cli.create_host_config(port_bindings={ + client.api.create_host_config(port_bindings={ 1111: [1234, 4567] }) @@ -291,7 +291,7 @@ class ContainerApiMixin(object): .. code-block:: python - cli.create_host_config(port_bindings={ + client.api.create_host_config(port_bindings={ 1111: [ ('192.168.0.100', 1234), ('192.168.0.101', 1234) @@ -307,9 +307,9 @@ class ContainerApiMixin(object): .. code-block:: python - container_id = cli.create_container( + container_id = client.api.create_container( 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], - host_config=cli.create_host_config(binds={ + host_config=client.api.create_host_config(binds={ '/home/user1/': { 'bind': '/mnt/vol2', 'mode': 'rw', @@ -326,9 +326,9 @@ class ContainerApiMixin(object): .. code-block:: python - container_id = cli.create_container( + container_id = client.api.create_container( 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], - host_config=cli.create_host_config(binds=[ + host_config=client.api.create_host_config(binds=[ '/home/user1/:/mnt/vol2', '/var/www:/mnt/vol1:ro', ]) @@ -346,15 +346,15 @@ class ContainerApiMixin(object): .. code-block:: python - networking_config = docker_client.create_networking_config({ - 'network1': docker_client.create_endpoint_config( + networking_config = client.api.create_networking_config({ + 'network1': client.api.create_endpoint_config( ipv4_address='172.28.0.124', aliases=['foo', 'bar'], links=['container2'] ) }) - ctnr = docker_client.create_container( + ctnr = client.api.create_container( img, command, networking_config=networking_config ) @@ -398,6 +398,7 @@ class ContainerApiMixin(object): configuration file (``~/.docker/config.json`` by default) contains a proxy configuration, the corresponding environment variables will be set in the container being created. + platform (str): Platform in the format ``os[/arch[/variant]]``. Returns: A dictionary with an image 'Id' key and a 'Warnings' key. @@ -408,7 +409,7 @@ class ContainerApiMixin(object): :py:class:`docker.errors.APIError` If the server returns an error. """ - if isinstance(volumes, six.string_types): + if isinstance(volumes, str): volumes = [volumes, ] if isinstance(environment, dict): @@ -427,16 +428,22 @@ class ContainerApiMixin(object): stop_signal, networking_config, healthcheck, stop_timeout, runtime ) - return self.create_container_from_config(config, name) + return self.create_container_from_config(config, name, platform) def create_container_config(self, *args, **kwargs): return ContainerConfig(self._version, *args, **kwargs) - def create_container_from_config(self, config, name=None): + def create_container_from_config(self, config, name=None, platform=None): u = self._url("/containers/create") params = { 'name': name } + if platform: + if utils.version_lt(self._version, '1.41'): + raise errors.InvalidVersion( + 'platform is not supported for API version < 1.41' + ) + params['platform'] = platform res = self._post_json(u, data=config, params=params) return self._result(res, True) @@ -480,6 +487,9 @@ class ContainerApiMixin(object): For example, ``/dev/sda:/dev/xvda:rwm`` allows the container to have read-write access to the host's ``/dev/sda`` via a node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. dns (:py:class:`list`): Set custom DNS servers. dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file @@ -503,7 +513,7 @@ class ContainerApiMixin(object): bytes) or a string with a units identification char (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is specified without a units character, bytes are assumed as an - mem_reservation (int or str): Memory soft limit. + mem_reservation (float or str): Memory soft limit. mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (str or int): Maximum amount of memory + swap a @@ -520,6 +530,8 @@ class ContainerApiMixin(object): - ``container:<name|id>`` Reuse another container's network stack. - ``host`` Use the host network stack. + This mode is incompatible with ``port_bindings``. + oom_kill_disable (bool): Whether to disable OOM killer. oom_score_adj (int): An integer value containing the score given to the container in order to tune OOM killer preferences. @@ -528,7 +540,8 @@ class ContainerApiMixin(object): pids_limit (int): Tune a container's pids limit. Set ``-1`` for unlimited. port_bindings (dict): See :py:meth:`create_container` - for more information. + for more information. + Imcompatible with ``host`` in ``network_mode``. privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read @@ -575,10 +588,13 @@ class ContainerApiMixin(object): Example: - >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'], - volumes_from=['nostalgic_newton']) + >>> client.api.create_host_config( + ... privileged=True, + ... cap_drop=['MKNOD'], + ... volumes_from=['nostalgic_newton'], + ... ) {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, - 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} + 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} """ if not kwargs: @@ -606,11 +622,11 @@ class ContainerApiMixin(object): Example: - >>> docker_client.create_network('network1') - >>> networking_config = docker_client.create_networking_config({ - 'network1': docker_client.create_endpoint_config() + >>> client.api.create_network('network1') + >>> networking_config = client.api.create_networking_config({ + 'network1': client.api.create_endpoint_config() }) - >>> container = docker_client.create_container( + >>> container = client.api.create_container( img, command, networking_config=networking_config ) @@ -636,13 +652,15 @@ class ContainerApiMixin(object): network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. Returns: (dict) An endpoint config. Example: - >>> endpoint_config = client.create_endpoint_config( + >>> endpoint_config = client.api.create_endpoint_config( aliases=['web', 'app'], links={'app_db': 'db', 'another': None}, ipv4_address='132.65.0.123' @@ -694,7 +712,8 @@ class ContainerApiMixin(object): return self._stream_raw_result(res, chunk_size, False) @utils.check_resource('container') - def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): """ Retrieve a file or folder from a container in the form of a tar archive. @@ -705,6 +724,8 @@ class ContainerApiMixin(object): chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False Returns: (tuple): First element is a raw tar data stream. Second element is @@ -718,7 +739,7 @@ class ContainerApiMixin(object): >>> c = docker.APIClient() >>> f = open('./sh_bin.tar', 'wb') - >>> bits, stat = c.get_archive(container, '/bin/sh') + >>> bits, stat = c.api.get_archive(container, '/bin/sh') >>> print(stat) {'name': 'sh', 'size': 1075464, 'mode': 493, 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} @@ -729,8 +750,13 @@ class ContainerApiMixin(object): params = { 'path': path } + headers = { + "Accept-Encoding": "gzip, deflate" + } if encode_stream else { + "Accept-Encoding": "identity" + } url = self._url('/containers/{0}/archive', container) - res = self._get(url, params=params, stream=True) + res = self._get(url, params=params, stream=True, headers=headers) self._raise_for_status(res) encoded_stat = res.headers.get('x-docker-container-path-stat') return ( @@ -774,7 +800,7 @@ class ContainerApiMixin(object): url = self._url("/containers/{0}/kill", container) params = {} if signal is not None: - if not isinstance(signal, six.string_types): + if not isinstance(signal, str): signal = int(signal) params['signal'] = signal res = self._post(url, params=params) @@ -900,7 +926,7 @@ class ContainerApiMixin(object): .. code-block:: python - >>> cli.port('7174d6347063', 80) + >>> client.api.port('7174d6347063', 80) [{'HostIp': '0.0.0.0', 'HostPort': '80'}] """ res = self._get(self._url("/containers/{0}/json", container)) @@ -1079,10 +1105,10 @@ class ContainerApiMixin(object): Example: - >>> container = cli.create_container( + >>> container = client.api.create_container( ... image='busybox:latest', ... command='/bin/sleep 30') - >>> cli.start(container=container.get('Id')) + >>> client.api.start(container=container.get('Id')) """ if args or kwargs: raise errors.DeprecatedMethod( @@ -1120,7 +1146,7 @@ class ContainerApiMixin(object): else: if decode: raise errors.InvalidArgument( - "decode is only available in conjuction with stream=True" + "decode is only available in conjunction with stream=True" ) return self._result(self._get(url, params={'stream': False}), json=True) @@ -1206,8 +1232,8 @@ class ContainerApiMixin(object): cpu_shares (int): CPU shares (relative weight) cpuset_cpus (str): CPUs in which to allow execution cpuset_mems (str): MEMs in which to allow execution - mem_limit (int or str): Memory limit - mem_reservation (int or str): Memory soft limit + mem_limit (float or str): Memory limit + mem_reservation (float or str): Memory soft limit memswap_limit (int or str): Total memory (memory + swap), -1 to disable swap kernel_memory (int or str): Kernel memory limit diff --git a/docker/api/daemon.py b/docker/api/daemon.py index f715a13..a857213 100644 --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -4,7 +4,7 @@ from datetime import datetime from .. import auth, types, utils -class DaemonApiMixin(object): +class DaemonApiMixin: @utils.minimum_version('1.25') def df(self): """ @@ -109,7 +109,7 @@ class DaemonApiMixin(object): the Docker server. dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, - otherwise``$HOME/.dockercfg``) + otherwise ``$HOME/.dockercfg``) Returns: (dict): The response from the login request diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py index 4c49ac3..496308a 100644 --- a/docker/api/exec_api.py +++ b/docker/api/exec_api.py @@ -1,10 +1,8 @@ -import six - from .. import errors from .. import utils -class ExecApiMixin(object): +class ExecApiMixin: @utils.check_resource('container') def exec_create(self, container, cmd, stdout=True, stderr=True, stdin=False, tty=False, privileged=False, user='', @@ -45,7 +43,7 @@ class ExecApiMixin(object): 'Setting environment for exec is not supported in API < 1.25' ) - if isinstance(cmd, six.string_types): + if isinstance(cmd, str): cmd = utils.split_command(cmd) if isinstance(environment, dict): diff --git a/docker/api/image.py b/docker/api/image.py index 11c8cf7..5e1466e 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -1,15 +1,13 @@ import logging import os -import six - from .. import auth, errors, utils from ..constants import DEFAULT_DATA_CHUNK_SIZE log = logging.getLogger(__name__) -class ImageApiMixin(object): +class ImageApiMixin: @utils.check_resource('image') def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE): @@ -31,7 +29,7 @@ class ImageApiMixin(object): Example: - >>> image = cli.get_image("busybox:latest") + >>> image = client.api.get_image("busybox:latest") >>> f = open('/tmp/busybox-latest.tar', 'wb') >>> for chunk in image: >>> f.write(chunk) @@ -81,10 +79,18 @@ class ImageApiMixin(object): If the server returns an error. """ params = { - 'filter': name, 'only_ids': 1 if quiet else 0, 'all': 1 if all else 0, } + if name: + if utils.version_lt(self._version, '1.25'): + # only use "filter" on API 1.24 and under, as it is deprecated + params['filter'] = name + else: + if filters: + filters['reference'] = name + else: + filters = {'reference': name} if filters: params['filters'] = utils.convert_filters(filters) res = self._result(self._get(self._url("/images/json"), params=params), @@ -122,7 +128,7 @@ class ImageApiMixin(object): params = _import_image_params( repository, tag, image, - src=(src if isinstance(src, six.string_types) else None), + src=(src if isinstance(src, str) else None), changes=changes ) headers = {'Content-Type': 'application/tar'} @@ -131,7 +137,7 @@ class ImageApiMixin(object): return self._result( self._post(u, data=None, params=params) ) - elif isinstance(src, six.string_types): # from file path + elif isinstance(src, str): # from file path with open(src, 'rb') as f: return self._result( self._post( @@ -343,13 +349,14 @@ class ImageApiMixin(object): return self._result(self._post(url, params=params), True) def pull(self, repository, tag=None, stream=False, auth_config=None, - decode=False, platform=None): + decode=False, platform=None, all_tags=False): """ Pulls an image. Similar to the ``docker pull`` command. Args: repository (str): The repository to pull - tag (str): The tag to pull + tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it + is set to ``latest``. stream (bool): Stream the output as a generator. Make sure to consume the generator, otherwise pull might get cancelled. auth_config (dict): Override the credentials that are found in the @@ -358,6 +365,8 @@ class ImageApiMixin(object): decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags, the ``tag`` parameter is + ignored. Returns: (generator or str): The output @@ -368,7 +377,8 @@ class ImageApiMixin(object): Example: - >>> for line in cli.pull('busybox', stream=True, decode=True): + >>> resp = client.api.pull('busybox', stream=True, decode=True) + ... for line in resp: ... print(json.dumps(line, indent=4)) { "status": "Pulling image (latest) from busybox", @@ -382,8 +392,12 @@ class ImageApiMixin(object): } """ - if not tag: - repository, tag = utils.parse_repository_tag(repository) + repository, image_tag = utils.parse_repository_tag(repository) + tag = tag or image_tag or 'latest' + + if all_tags: + tag = None + registry, repo_name = auth.resolve_repository_name(repository) params = { @@ -443,7 +457,12 @@ class ImageApiMixin(object): If the server returns an error. Example: - >>> for line in cli.push('yourname/app', stream=True, decode=True): + >>> resp = client.api.push( + ... 'yourname/app', + ... stream=True, + ... decode=True, + ... ) + ... for line in resp: ... print(line) {'status': 'Pushing repository yourname/app (1 tags)'} {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} @@ -494,13 +513,14 @@ class ImageApiMixin(object): res = self._delete(self._url("/images/{0}", image), params=params) return self._result(res, True) - def search(self, term): + def search(self, term, limit=None): """ Search for images on Docker Hub. Similar to the ``docker search`` command. Args: term (str): A term to search for. + limit (int): The maximum number of results to return. Returns: (list of dicts): The response of the search. @@ -509,8 +529,12 @@ class ImageApiMixin(object): :py:class:`docker.errors.APIError` If the server returns an error. """ + params = {'term': term} + if limit is not None: + params['limit'] = limit + return self._result( - self._get(self._url("/images/search"), params={'term': term}), + self._get(self._url("/images/search"), params=params), True ) @@ -534,7 +558,7 @@ class ImageApiMixin(object): Example: - >>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest', + >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest', force=True) """ params = { @@ -551,7 +575,7 @@ class ImageApiMixin(object): def is_file(src): try: return ( - isinstance(src, six.string_types) and + isinstance(src, str) and os.path.isfile(src) ) except TypeError: # a data string will make isfile() raise a TypeError diff --git a/docker/api/network.py b/docker/api/network.py index 1709b62..dd4e376 100644 --- a/docker/api/network.py +++ b/docker/api/network.py @@ -4,7 +4,7 @@ from ..utils import version_lt from .. import utils -class NetworkApiMixin(object): +class NetworkApiMixin: def networks(self, names=None, ids=None, filters=None): """ List networks. Similar to the ``docker network ls`` command. @@ -75,7 +75,7 @@ class NetworkApiMixin(object): Example: A network using the bridge driver: - >>> client.create_network("network1", driver="bridge") + >>> client.api.create_network("network1", driver="bridge") You can also create more advanced networks with custom IPAM configurations. For example, setting the subnet to @@ -90,7 +90,7 @@ class NetworkApiMixin(object): >>> ipam_config = docker.types.IPAMConfig( pool_configs=[ipam_pool] ) - >>> docker_client.create_network("network1", driver="bridge", + >>> client.api.create_network("network1", driver="bridge", ipam=ipam_config) """ if options is not None and not isinstance(options, dict): @@ -216,7 +216,8 @@ class NetworkApiMixin(object): def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, aliases=None, links=None, - link_local_ips=None, mac_address=None): + link_local_ips=None, driver_opt=None, + mac_address=None): """ Connect a container to a network. @@ -243,6 +244,7 @@ class NetworkApiMixin(object): "EndpointConfig": self.create_endpoint_config( aliases=aliases, links=links, ipv4_address=ipv4_address, ipv6_address=ipv6_address, link_local_ips=link_local_ips, + driver_opt=driver_opt, mac_address=mac_address ), } diff --git a/docker/api/plugin.py b/docker/api/plugin.py index f6c0b13..10210c1 100644 --- a/docker/api/plugin.py +++ b/docker/api/plugin.py @@ -1,9 +1,7 @@ -import six - from .. import auth, utils -class PluginApiMixin(object): +class PluginApiMixin: @utils.minimum_version('1.25') @utils.check_resource('name') def configure_plugin(self, name, options): @@ -21,7 +19,7 @@ class PluginApiMixin(object): url = self._url('/plugins/{0}/set', name) data = options if isinstance(data, dict): - data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)] + data = [f'{k}={v}' for k, v in data.items()] res = self._post_json(url, data=data) self._raise_for_status(res) return True @@ -53,19 +51,20 @@ class PluginApiMixin(object): return True @utils.minimum_version('1.25') - def disable_plugin(self, name): + def disable_plugin(self, name, force=False): """ Disable an installed plugin. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. + force (bool): To enable the force query parameter. Returns: ``True`` if successful """ url = self._url('/plugins/{0}/disable', name) - res = self._post(url) + res = self._post(url, params={'force': force}) self._raise_for_status(res) return True diff --git a/docker/api/secret.py b/docker/api/secret.py index e57952b..cd440b9 100644 --- a/docker/api/secret.py +++ b/docker/api/secret.py @@ -1,12 +1,10 @@ import base64 -import six - from .. import errors from .. import utils -class SecretApiMixin(object): +class SecretApiMixin: @utils.minimum_version('1.25') def create_secret(self, name, data, labels=None, driver=None): """ @@ -25,8 +23,7 @@ class SecretApiMixin(object): data = data.encode('utf-8') data = base64.b64encode(data) - if six.PY3: - data = data.decode('ascii') + data = data.decode('ascii') body = { 'Data': data, 'Name': name, diff --git a/docker/api/service.py b/docker/api/service.py index e9027bf..371f541 100644 --- a/docker/api/service.py +++ b/docker/api/service.py @@ -45,7 +45,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec, if task_template is not None: if 'ForceUpdate' in task_template and utils.version_lt( version, '1.25'): - raise_version_error('force_update', '1.25') + raise_version_error('force_update', '1.25') if task_template.get('Placement'): if utils.version_lt(version, '1.30'): @@ -113,7 +113,7 @@ def _merge_task_template(current, override): return merged -class ServiceApiMixin(object): +class ServiceApiMixin: @utils.minimum_version('1.24') def create_service( self, task_template, name=None, labels=None, mode=None, diff --git a/docker/api/swarm.py b/docker/api/swarm.py index 897f08e..db40fdd 100644 --- a/docker/api/swarm.py +++ b/docker/api/swarm.py @@ -1,5 +1,5 @@ import logging -from six.moves import http_client +import http.client as http_client from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE from .. import errors from .. import types @@ -8,7 +8,7 @@ from .. import utils log = logging.getLogger(__name__) -class SwarmApiMixin(object): +class SwarmApiMixin: def create_swarm_spec(self, *args, **kwargs): """ @@ -58,10 +58,10 @@ class SwarmApiMixin(object): Example: - >>> spec = client.create_swarm_spec( + >>> spec = client.api.create_swarm_spec( snapshot_interval=5000, log_entries_for_slow_followers=1200 ) - >>> client.init_swarm( + >>> client.api.init_swarm( advertise_addr='eth0', listen_addr='0.0.0.0:5000', force_new_cluster=False, swarm_spec=spec ) @@ -354,8 +354,8 @@ class SwarmApiMixin(object): Example: - >>> key = client.get_unlock_key() - >>> client.unlock_node(key) + >>> key = client.api.get_unlock_key() + >>> client.unlock_swarm(key) """ if isinstance(key, dict): @@ -396,7 +396,7 @@ class SwarmApiMixin(object): 'Role': 'manager', 'Labels': {'foo': 'bar'} } - >>> client.update_node(node_id='24ifsmvkjbyhk', version=8, + >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8, node_spec=node_spec) """ diff --git a/docker/api/volume.py b/docker/api/volume.py index 900a608..98b42a1 100644 --- a/docker/api/volume.py +++ b/docker/api/volume.py @@ -2,7 +2,7 @@ from .. import errors from .. import utils -class VolumeApiMixin(object): +class VolumeApiMixin: def volumes(self, filters=None): """ List volumes currently registered by the docker daemon. Similar to the @@ -21,7 +21,7 @@ class VolumeApiMixin(object): Example: - >>> cli.volumes() + >>> client.api.volumes() {u'Volumes': [{u'Driver': u'local', u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Name': u'foobar'}, @@ -56,15 +56,18 @@ class VolumeApiMixin(object): Example: - >>> volume = cli.create_volume(name='foobar', driver='local', - driver_opts={'foo': 'bar', 'baz': 'false'}, - labels={"key": "value"}) - >>> print(volume) + >>> volume = client.api.create_volume( + ... name='foobar', + ... driver='local', + ... driver_opts={'foo': 'bar', 'baz': 'false'}, + ... labels={"key": "value"}, + ... ) + ... print(volume) {u'Driver': u'local', - u'Labels': {u'key': u'value'}, - u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', - u'Name': u'foobar', - u'Scope': u'local'} + u'Labels': {u'key': u'value'}, + u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', + u'Name': u'foobar', + u'Scope': u'local'} """ url = self._url('/volumes/create') @@ -104,7 +107,7 @@ class VolumeApiMixin(object): Example: - >>> cli.inspect_volume('foobar') + >>> client.api.inspect_volume('foobar') {u'Driver': u'local', u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Name': u'foobar'} diff --git a/docker/auth.py b/docker/auth.py index 6a07ea2..cb38855 100644 --- a/docker/auth.py +++ b/docker/auth.py @@ -2,14 +2,12 @@ import base64 import json import logging -import six - from . import credentials from . import errors from .utils import config INDEX_NAME = 'docker.io' -INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME) +INDEX_URL = f'https://index.{INDEX_NAME}/v1/' TOKEN_USERNAME = '<token>' log = logging.getLogger(__name__) @@ -18,13 +16,13 @@ log = logging.getLogger(__name__) def resolve_repository_name(repo_name): if '://' in repo_name: raise errors.InvalidRepository( - 'Repository name cannot contain a scheme ({0})'.format(repo_name) + f'Repository name cannot contain a scheme ({repo_name})' ) index_name, remote_name = split_repo_name(repo_name) if index_name[0] == '-' or index_name[-1] == '-': raise errors.InvalidRepository( - 'Invalid index name ({0}). Cannot begin or end with a' + 'Invalid index name ({}). Cannot begin or end with a' ' hyphen.'.format(index_name) ) return resolve_index_name(index_name), remote_name @@ -98,10 +96,10 @@ class AuthConfig(dict): """ conf = {} - for registry, entry in six.iteritems(entries): + for registry, entry in entries.items(): if not isinstance(entry, dict): log.debug( - 'Config entry for key {0} is not auth config'.format( + 'Config entry for key {} is not auth config'.format( registry ) ) @@ -111,14 +109,14 @@ class AuthConfig(dict): # keys is not formatted properly. if raise_on_error: raise errors.InvalidConfigFile( - 'Invalid configuration for registry {0}'.format( + 'Invalid configuration for registry {}'.format( registry ) ) return {} if 'identitytoken' in entry: log.debug( - 'Found an IdentityToken entry for registry {0}'.format( + 'Found an IdentityToken entry for registry {}'.format( registry ) ) @@ -132,7 +130,7 @@ class AuthConfig(dict): # a valid value in the auths config. # https://github.com/docker/compose/issues/3265 log.debug( - 'Auth data for {0} is absent. Client might be using a ' + 'Auth data for {} is absent. Client might be using a ' 'credentials store instead.'.format(registry) ) conf[registry] = {} @@ -140,7 +138,7 @@ class AuthConfig(dict): username, password = decode_auth(entry['auth']) log.debug( - 'Found entry (registry={0}, username={1})' + 'Found entry (registry={}, username={})' .format(repr(registry), repr(username)) ) @@ -170,7 +168,7 @@ class AuthConfig(dict): try: with open(config_file) as f: config_dict = json.load(f) - except (IOError, KeyError, ValueError) as e: + except (OSError, KeyError, ValueError) as e: # Likely missing new Docker config file or it's in an # unknown format, continue to attempt to read old location # and format. @@ -230,7 +228,7 @@ class AuthConfig(dict): store_name = self.get_credential_store(registry) if store_name is not None: log.debug( - 'Using credentials store "{0}"'.format(store_name) + f'Using credentials store "{store_name}"' ) cfg = self._resolve_authconfig_credstore(registry, store_name) if cfg is not None: @@ -239,15 +237,15 @@ class AuthConfig(dict): # Default to the public index server registry = resolve_index_name(registry) if registry else INDEX_NAME - log.debug("Looking for auth entry for {0}".format(repr(registry))) + log.debug(f"Looking for auth entry for {repr(registry)}") if registry in self.auths: - log.debug("Found {0}".format(repr(registry))) + log.debug(f"Found {repr(registry)}") return self.auths[registry] - for key, conf in six.iteritems(self.auths): + for key, conf in self.auths.items(): if resolve_index_name(key) == registry: - log.debug("Found {0}".format(repr(key))) + log.debug(f"Found {repr(key)}") return conf log.debug("No entry found") @@ -258,7 +256,7 @@ class AuthConfig(dict): # The ecosystem is a little schizophrenic with index.docker.io VS # docker.io - in that case, it seems the full URL is necessary. registry = INDEX_URL - log.debug("Looking for auth entry for {0}".format(repr(registry))) + log.debug(f"Looking for auth entry for {repr(registry)}") store = self._get_store_instance(credstore_name) try: data = store.get(registry) @@ -278,7 +276,7 @@ class AuthConfig(dict): return None except credentials.StoreError as e: raise errors.DockerException( - 'Credentials store error: {0}'.format(repr(e)) + f'Credentials store error: {repr(e)}' ) def _get_store_instance(self, name): @@ -329,7 +327,7 @@ def convert_to_hostname(url): def decode_auth(auth): - if isinstance(auth, six.string_types): + if isinstance(auth, str): auth = auth.encode('ascii') s = base64.b64decode(auth) login, pwd = s.split(b':', 1) @@ -385,7 +383,6 @@ def _load_legacy_config(config_file): }} except Exception as e: log.debug(e) - pass log.debug("All parsing attempts failed - returning empty config") return {} diff --git a/docker/client.py b/docker/client.py index 99ae196..4dbd846 100644 --- a/docker/client.py +++ b/docker/client.py @@ -1,5 +1,5 @@ from .api.client import APIClient -from .constants import DEFAULT_TIMEOUT_SECONDS +from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE) from .models.configs import ConfigCollection from .models.containers import ContainerCollection from .models.images import ImageCollection @@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection from .utils import kwargs_from_env -class DockerClient(object): +class DockerClient: """ A client for communicating with a Docker server. @@ -35,6 +35,11 @@ class DockerClient(object): user_agent (str): Set a custom user agent for requests to the server. credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. """ def __init__(self, *args, **kwargs): self.api = APIClient(*args, **kwargs) @@ -62,14 +67,19 @@ class DockerClient(object): Args: version (str): The version of the API to use. Set to ``auto`` to - automatically detect the server's version. Default: ``1.35`` + automatically detect the server's version. Default: ``auto`` timeout (int): Default timeout for API calls, in seconds. + max_pool_size (int): The maximum number of connections + to save in the pool. ssl_version (int): A valid `SSL version`_. assert_hostname (bool): Verify the hostname of the server. environment (dict): The environment to read environment variables from. Default: the value of ``os.environ`` credstore_env (dict): Override environment variables when calling the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is + made via shelling out to the ssh client. Ensure the ssh + client is installed and configured on the host. Example: @@ -80,9 +90,15 @@ class DockerClient(object): https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1 """ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS) + max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE) version = kwargs.pop('version', None) + use_ssh_client = kwargs.pop('use_ssh_client', False) return cls( - timeout=timeout, version=version, **kwargs_from_env(**kwargs) + timeout=timeout, + max_pool_size=max_pool_size, + version=version, + use_ssh_client=use_ssh_client, + **kwargs_from_env(**kwargs) ) # Resources @@ -196,7 +212,7 @@ class DockerClient(object): close.__doc__ = APIClient.close.__doc__ def __getattr__(self, name): - s = ["'DockerClient' object has no attribute '{}'".format(name)] + s = [f"'DockerClient' object has no attribute '{name}'"] # If a user calls a method on APIClient, they if hasattr(APIClient, name): s.append("In Docker SDK for Python 2.0, this method is now on the " diff --git a/docker/constants.py b/docker/constants.py index 4b96e1c..ed341a9 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -1,7 +1,7 @@ import sys -from .version import version +from .version import __version__ -DEFAULT_DOCKER_API_VERSION = '1.35' +DEFAULT_DOCKER_API_VERSION = '1.41' MINIMUM_DOCKER_API_VERSION = '1.21' DEFAULT_TIMEOUT_SECONDS = 60 STREAM_HEADER_SIZE_BYTES = 8 @@ -9,6 +9,18 @@ CONTAINER_LIMITS_KEYS = [ 'memory', 'memswap', 'cpushares', 'cpusetcpus' ] +DEFAULT_HTTP_HOST = "127.0.0.1" +DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" +DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' + +BYTE_UNITS = { + 'b': 1, + 'k': 1024, + 'm': 1024 * 1024, + 'g': 1024 * 1024 * 1024 +} + + INSECURE_REGISTRY_DEPRECATION_WARNING = \ 'The `insecure_registry` argument to {} ' \ 'is deprecated and non-functional. Please remove it.' @@ -16,7 +28,7 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \ IS_WINDOWS_PLATFORM = (sys.platform == 'win32') WINDOWS_LONGPATH_PREFIX = '\\\\?\\' -DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version) +DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}" DEFAULT_NUM_POOLS = 25 # The OpenSSH server default value for MaxSessions is 10 which means we can @@ -24,6 +36,8 @@ DEFAULT_NUM_POOLS = 25 # For more details see: https://github.com/docker/docker-py/issues/2246 DEFAULT_NUM_POOLS_SSH = 9 +DEFAULT_MAX_POOL_SIZE = 10 + DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8'] diff --git a/docker/context/__init__.py b/docker/context/__init__.py new file mode 100644 index 0000000..0a6707f --- /dev/null +++ b/docker/context/__init__.py @@ -0,0 +1,3 @@ +# flake8: noqa +from .context import Context +from .api import ContextAPI diff --git a/docker/context/api.py b/docker/context/api.py new file mode 100644 index 0000000..380e8c4 --- /dev/null +++ b/docker/context/api.py @@ -0,0 +1,203 @@ +import json +import os + +from docker import errors +from docker.context.config import get_meta_dir +from docker.context.config import METAFILE +from docker.context.config import get_current_context_name +from docker.context.config import write_context_name_to_docker_config +from docker.context import Context + + +class ContextAPI: + """Context API. + Contains methods for context management: + create, list, remove, get, inspect. + """ + DEFAULT_CONTEXT = Context("default", "swarm") + + @classmethod + def create_context( + cls, name, orchestrator=None, host=None, tls_cfg=None, + default_namespace=None, skip_tls_verify=False): + """Creates a new context. + Returns: + (Context): a Context object. + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextAlreadyExists` + If a context with the name already exists. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.create_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + '"default" is a reserved context name') + ctx = Context.load_context(name) + if ctx: + raise errors.ContextAlreadyExists(name) + endpoint = "docker" + if orchestrator and orchestrator != "swarm": + endpoint = orchestrator + ctx = Context(name, orchestrator) + ctx.set_endpoint( + endpoint, host, tls_cfg, + skip_tls_verify=skip_tls_verify, + def_namespace=default_namespace) + ctx.save() + return ctx + + @classmethod + def get_context(cls, name=None): + """Retrieves a context object. + Args: + name (str): The name of the context + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.get_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + name = get_current_context_name() + if name == "default": + return cls.DEFAULT_CONTEXT + return Context.load_context(name) + + @classmethod + def contexts(cls): + """Context list. + Returns: + (Context): List of context objects. + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + names = [] + for dirname, dirnames, fnames in os.walk(get_meta_dir()): + for filename in fnames + dirnames: + if filename == METAFILE: + try: + data = json.load( + open(os.path.join(dirname, filename))) + names.append(data["Name"]) + except Exception as e: + raise errors.ContextException( + "Failed to load metafile {}: {}".format( + filename, e)) + + contexts = [cls.DEFAULT_CONTEXT] + for name in names: + contexts.append(Context.load_context(name)) + return contexts + + @classmethod + def get_current_context(cls): + """Get current context. + Returns: + (Context): current context object. + """ + return cls.get_context() + + @classmethod + def set_current_context(cls, name="default"): + ctx = cls.get_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + err = write_context_name_to_docker_config(name) + if err: + raise errors.ContextException( + f'Failed to set current context: {err}') + + @classmethod + def remove_context(cls, name): + """Remove a context. Similar to the ``docker context rm`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + 'context "default" cannot be removed') + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + if name == get_current_context_name(): + write_context_name_to_docker_config(None) + ctx.remove() + + @classmethod + def inspect_context(cls, name="default"): + """Remove a context. Similar to the ``docker context inspect`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + return cls.DEFAULT_CONTEXT() + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + return ctx() diff --git a/docker/context/config.py b/docker/context/config.py new file mode 100644 index 0000000..d761aef --- /dev/null +++ b/docker/context/config.py @@ -0,0 +1,81 @@ +import os +import json +import hashlib + +from docker import utils +from docker.constants import IS_WINDOWS_PLATFORM +from docker.constants import DEFAULT_UNIX_SOCKET +from docker.utils.config import find_config_file + +METAFILE = "meta.json" + + +def get_current_context_name(): + name = "default" + docker_cfg_path = find_config_file() + if docker_cfg_path: + try: + with open(docker_cfg_path) as f: + name = json.load(f).get("currentContext", "default") + except Exception: + return "default" + return name + + +def write_context_name_to_docker_config(name=None): + if name == 'default': + name = None + docker_cfg_path = find_config_file() + config = {} + if docker_cfg_path: + try: + with open(docker_cfg_path) as f: + config = json.load(f) + except Exception as e: + return e + current_context = config.get("currentContext", None) + if current_context and not name: + del config["currentContext"] + elif name: + config["currentContext"] = name + else: + return + try: + with open(docker_cfg_path, "w") as f: + json.dump(config, f, indent=4) + except Exception as e: + return e + + +def get_context_id(name): + return hashlib.sha256(name.encode('utf-8')).hexdigest() + + +def get_context_dir(): + return os.path.join(os.path.dirname(find_config_file() or ""), "contexts") + + +def get_meta_dir(name=None): + meta_dir = os.path.join(get_context_dir(), "meta") + if name: + return os.path.join(meta_dir, get_context_id(name)) + return meta_dir + + +def get_meta_file(name): + return os.path.join(get_meta_dir(name), METAFILE) + + +def get_tls_dir(name=None, endpoint=""): + context_dir = get_context_dir() + if name: + return os.path.join(context_dir, "tls", get_context_id(name), endpoint) + return os.path.join(context_dir, "tls") + + +def get_context_host(path=None, tls=False): + host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls) + if host == DEFAULT_UNIX_SOCKET: + # remove http+ from default docker socket url + return host.strip("http+") + return host diff --git a/docker/context/context.py b/docker/context/context.py new file mode 100644 index 0000000..dbaa01c --- /dev/null +++ b/docker/context/context.py @@ -0,0 +1,243 @@ +import os +import json +from shutil import copyfile, rmtree +from docker.tls import TLSConfig +from docker.errors import ContextException +from docker.context.config import get_meta_dir +from docker.context.config import get_meta_file +from docker.context.config import get_tls_dir +from docker.context.config import get_context_host + + +class Context: + """A context.""" + + def __init__(self, name, orchestrator=None, host=None, endpoints=None, + tls=False): + if not name: + raise Exception("Name not provided") + self.name = name + self.context_type = None + self.orchestrator = orchestrator + self.endpoints = {} + self.tls_cfg = {} + self.meta_path = "IN MEMORY" + self.tls_path = "IN MEMORY" + + if not endpoints: + # set default docker endpoint if no endpoint is set + default_endpoint = "docker" if ( + not orchestrator or orchestrator == "swarm" + ) else orchestrator + + self.endpoints = { + default_endpoint: { + "Host": get_context_host(host, tls), + "SkipTLSVerify": not tls + } + } + return + + # check docker endpoints + for k, v in endpoints.items(): + if not isinstance(v, dict): + # unknown format + raise ContextException("""Unknown endpoint format for + context {}: {}""".format(name, v)) + + self.endpoints[k] = v + if k != "docker": + continue + + self.endpoints[k]["Host"] = v.get("Host", get_context_host( + host, tls)) + self.endpoints[k]["SkipTLSVerify"] = bool(v.get( + "SkipTLSVerify", not tls)) + + def set_endpoint( + self, name="docker", host=None, tls_cfg=None, + skip_tls_verify=False, def_namespace=None): + self.endpoints[name] = { + "Host": get_context_host(host, not skip_tls_verify), + "SkipTLSVerify": skip_tls_verify + } + if def_namespace: + self.endpoints[name]["DefaultNamespace"] = def_namespace + + if tls_cfg: + self.tls_cfg[name] = tls_cfg + + def inspect(self): + return self.__call__() + + @classmethod + def load_context(cls, name): + meta = Context._load_meta(name) + if meta: + instance = cls( + meta["Name"], + orchestrator=meta["Metadata"].get("StackOrchestrator", None), + endpoints=meta.get("Endpoints", None)) + instance.context_type = meta["Metadata"].get("Type", None) + instance._load_certs() + instance.meta_path = get_meta_dir(name) + return instance + return None + + @classmethod + def _load_meta(cls, name): + meta_file = get_meta_file(name) + if not os.path.isfile(meta_file): + return None + + metadata = {} + try: + with open(meta_file) as f: + metadata = json.load(f) + except (OSError, KeyError, ValueError) as e: + # unknown format + raise Exception("""Detected corrupted meta file for + context {} : {}""".format(name, e)) + + # for docker endpoints, set defaults for + # Host and SkipTLSVerify fields + for k, v in metadata["Endpoints"].items(): + if k != "docker": + continue + metadata["Endpoints"][k]["Host"] = v.get( + "Host", get_context_host(None, False)) + metadata["Endpoints"][k]["SkipTLSVerify"] = bool( + v.get("SkipTLSVerify", True)) + + return metadata + + def _load_certs(self): + certs = {} + tls_dir = get_tls_dir(self.name) + for endpoint in self.endpoints.keys(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + continue + ca_cert = None + cert = None + key = None + for filename in os.listdir(os.path.join(tls_dir, endpoint)): + if filename.startswith("ca"): + ca_cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("cert"): + cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("key"): + key = os.path.join(tls_dir, endpoint, filename) + if all([ca_cert, cert, key]): + verify = None + if endpoint == "docker" and not self.endpoints["docker"].get( + "SkipTLSVerify", False): + verify = True + certs[endpoint] = TLSConfig( + client_cert=(cert, key), ca_cert=ca_cert, verify=verify) + self.tls_cfg = certs + self.tls_path = tls_dir + + def save(self): + meta_dir = get_meta_dir(self.name) + if not os.path.isdir(meta_dir): + os.makedirs(meta_dir) + with open(get_meta_file(self.name), "w") as f: + f.write(json.dumps(self.Metadata)) + + tls_dir = get_tls_dir(self.name) + for endpoint, tls in self.tls_cfg.items(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + os.makedirs(os.path.join(tls_dir, endpoint)) + + ca_file = tls.ca_cert + if ca_file: + copyfile(ca_file, os.path.join( + tls_dir, endpoint, os.path.basename(ca_file))) + + if tls.cert: + cert_file, key_file = tls.cert + copyfile(cert_file, os.path.join( + tls_dir, endpoint, os.path.basename(cert_file))) + copyfile(key_file, os.path.join( + tls_dir, endpoint, os.path.basename(key_file))) + + self.meta_path = get_meta_dir(self.name) + self.tls_path = get_tls_dir(self.name) + + def remove(self): + if os.path.isdir(self.meta_path): + rmtree(self.meta_path) + if os.path.isdir(self.tls_path): + rmtree(self.tls_path) + + def __repr__(self): + return f"<{self.__class__.__name__}: '{self.name}'>" + + def __str__(self): + return json.dumps(self.__call__(), indent=2) + + def __call__(self): + result = self.Metadata + result.update(self.TLSMaterial) + result.update(self.Storage) + return result + + def is_docker_host(self): + return self.context_type is None + + @property + def Name(self): + return self.name + + @property + def Host(self): + if not self.orchestrator or self.orchestrator == "swarm": + endpoint = self.endpoints.get("docker", None) + if endpoint: + return endpoint.get("Host", None) + return None + + return self.endpoints[self.orchestrator].get("Host", None) + + @property + def Orchestrator(self): + return self.orchestrator + + @property + def Metadata(self): + meta = {} + if self.orchestrator: + meta = {"StackOrchestrator": self.orchestrator} + return { + "Name": self.name, + "Metadata": meta, + "Endpoints": self.endpoints + } + + @property + def TLSConfig(self): + key = self.orchestrator + if not key or key == "swarm": + key = "docker" + if key in self.tls_cfg.keys(): + return self.tls_cfg[key] + return None + + @property + def TLSMaterial(self): + certs = {} + for endpoint, tls in self.tls_cfg.items(): + cert, key = tls.cert + certs[endpoint] = list( + map(os.path.basename, [tls.ca_cert, cert, key])) + return { + "TLSMaterial": certs + } + + @property + def Storage(self): + return { + "Storage": { + "MetadataPath": self.meta_path, + "TLSPath": self.tls_path + }} diff --git a/docker/credentials/store.py b/docker/credentials/store.py index 0017888..297f468 100644 --- a/docker/credentials/store.py +++ b/docker/credentials/store.py @@ -1,23 +1,21 @@ import errno import json +import shutil import subprocess -import six - from . import constants from . import errors from .utils import create_environment_dict -from .utils import find_executable -class Store(object): +class Store: def __init__(self, program, environment=None): """ Create a store object that acts as an interface to perform the basic operations for storing, retrieving and erasing credentials using `program`. """ self.program = constants.PROGRAM_PREFIX + program - self.exe = find_executable(self.program) + self.exe = shutil.which(self.program) self.environment = environment if self.exe is None: raise errors.InitializationError( @@ -30,7 +28,7 @@ class Store(object): """ Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised. """ - if not isinstance(server, six.binary_type): + if not isinstance(server, bytes): server = server.encode('utf-8') data = self._execute('get', server) result = json.loads(data.decode('utf-8')) @@ -41,7 +39,7 @@ class Store(object): # raise CredentialsNotFound if result['Username'] == '' and result['Secret'] == '': raise errors.CredentialsNotFound( - 'No matching credentials in {}'.format(self.program) + f'No matching credentials in {self.program}' ) return result @@ -61,7 +59,7 @@ class Store(object): """ Erase credentials for `server`. Raises a `StoreError` if an error occurs. """ - if not isinstance(server, six.binary_type): + if not isinstance(server, bytes): server = server.encode('utf-8') self._execute('erase', server) @@ -75,20 +73,9 @@ class Store(object): output = None env = create_environment_dict(self.environment) try: - if six.PY3: - output = subprocess.check_output( - [self.exe, subcmd], input=data_input, env=env, - ) - else: - process = subprocess.Popen( - [self.exe, subcmd], stdin=subprocess.PIPE, - stdout=subprocess.PIPE, env=env, - ) - output, _ = process.communicate(data_input) - if process.returncode != 0: - raise subprocess.CalledProcessError( - returncode=process.returncode, cmd='', output=output - ) + output = subprocess.check_output( + [self.exe, subcmd], input=data_input, env=env, + ) except subprocess.CalledProcessError as e: raise errors.process_store_error(e, self.program) except OSError as e: diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py index 3f720ef..5c83d05 100644 --- a/docker/credentials/utils.py +++ b/docker/credentials/utils.py @@ -1,32 +1,4 @@ -import distutils.spawn import os -import sys - - -def find_executable(executable, path=None): - """ - As distutils.spawn.find_executable, but on Windows, look up - every extension declared in PATHEXT instead of just `.exe` - """ - if sys.platform != 'win32': - return distutils.spawn.find_executable(executable, path) - - if path is None: - path = os.environ['PATH'] - - paths = path.split(os.pathsep) - extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep) - base, ext = os.path.splitext(executable) - - if not os.path.isfile(executable): - for p in paths: - for ext in extensions: - f = os.path.join(p, base + ext) - if os.path.isfile(f): - return f - return None - else: - return executable def create_environment_dict(overrides): diff --git a/docker/errors.py b/docker/errors.py index c340dcb..8cf8670 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -1,5 +1,14 @@ import requests +_image_not_found_explanation_fragments = frozenset( + fragment.lower() for fragment in [ + 'no such image', + 'not found: does not exist or no pull access', + 'repository does not exist', + 'was found but does not match the specified platform', + ] +) + class DockerException(Exception): """ @@ -21,14 +30,13 @@ def create_api_error_from_http_exception(e): explanation = (response.content or '').strip() cls = APIError if response.status_code == 404: - if explanation and ('No such image' in str(explanation) or - 'not found: does not exist or no pull access' - in str(explanation) or - 'repository does not exist' in str(explanation)): + explanation_msg = (explanation or '').lower() + if any(fragment in explanation_msg + for fragment in _image_not_found_explanation_fragments): cls = ImageNotFound else: cls = NotFound - raise cls(e, response=response, explanation=explanation) + raise cls(e, response=response, explanation=explanation) from e class APIError(requests.exceptions.HTTPError, DockerException): @@ -38,23 +46,25 @@ class APIError(requests.exceptions.HTTPError, DockerException): def __init__(self, message, response=None, explanation=None): # requests 1.2 supports response as a keyword argument, but # requests 1.1 doesn't - super(APIError, self).__init__(message) + super().__init__(message) self.response = response self.explanation = explanation def __str__(self): - message = super(APIError, self).__str__() + message = super().__str__() if self.is_client_error(): - message = '{0} Client Error: {1}'.format( - self.response.status_code, self.response.reason) + message = '{} Client Error for {}: {}'.format( + self.response.status_code, self.response.url, + self.response.reason) elif self.is_server_error(): - message = '{0} Server Error: {1}'.format( - self.response.status_code, self.response.reason) + message = '{} Server Error for {}: {}'.format( + self.response.status_code, self.response.url, + self.response.reason) if self.explanation: - message = '{0} ("{1}")'.format(message, self.explanation) + message = f'{message} ("{self.explanation}")' return message @@ -131,11 +141,11 @@ class ContainerError(DockerException): self.image = image self.stderr = stderr - err = ": {}".format(stderr) if stderr is not None else "" + err = f": {stderr}" if stderr is not None else "" msg = ("Command '{}' in image '{}' returned non-zero exit " "status {}{}").format(command, image, exit_status, err) - super(ContainerError, self).__init__(msg) + super().__init__(msg) class StreamParseError(RuntimeError): @@ -145,7 +155,7 @@ class StreamParseError(RuntimeError): class BuildError(DockerException): def __init__(self, reason, build_log): - super(BuildError, self).__init__(reason) + super().__init__(reason) self.msg = reason self.build_log = build_log @@ -155,11 +165,43 @@ class ImageLoadError(DockerException): def create_unexpected_kwargs_error(name, kwargs): - quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)] - text = ["{}() ".format(name)] + quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)] + text = [f"{name}() "] if len(quoted_kwargs) == 1: text.append("got an unexpected keyword argument ") else: text.append("got unexpected keyword arguments ") text.append(', '.join(quoted_kwargs)) return TypeError(''.join(text)) + + +class MissingContextParameter(DockerException): + def __init__(self, param): + self.param = param + + def __str__(self): + return (f"missing parameter: {self.param}") + + +class ContextAlreadyExists(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return (f"context {self.name} already exists") + + +class ContextException(DockerException): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return (self.msg) + + +class ContextNotFound(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return (f"context '{self.name}' not found") diff --git a/docker/models/configs.py b/docker/models/configs.py index 7f23f65..3588c8b 100644 --- a/docker/models/configs.py +++ b/docker/models/configs.py @@ -7,7 +7,7 @@ class Config(Model): id_attribute = 'ID' def __repr__(self): - return "<%s: '%s'>" % (self.__class__.__name__, self.name) + return f"<{self.__class__.__name__}: '{self.name}'>" @property def name(self): diff --git a/docker/models/containers.py b/docker/models/containers.py index d1f275f..6661b21 100644 --- a/docker/models/containers.py +++ b/docker/models/containers.py @@ -225,7 +225,8 @@ class Container(Model): """ return self.client.api.export(self.id, chunk_size) - def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): """ Retrieve a file or folder from the container in the form of a tar archive. @@ -235,6 +236,8 @@ class Container(Model): chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False Returns: (tuple): First element is a raw tar data stream. Second element is @@ -255,7 +258,8 @@ class Container(Model): ... f.write(chunk) >>> f.close() """ - return self.client.api.get_archive(self.id, path, chunk_size) + return self.client.api.get_archive(self.id, path, + chunk_size, encode_stream) def kill(self, signal=None): """ @@ -549,6 +553,11 @@ class ContainerCollection(Collection): ``["SYS_ADMIN", "MKNOD"]``. cap_drop (list of str): Drop kernel capabilities. cgroup_parent (str): Override the default parent cgroup. + cgroupns (str): Override the default cgroup namespace mode for the + container. One of: + - ``private`` the container runs in its own private cgroup + namespace. + - ``host`` use the host system's cgroup namespace. cpu_count (int): Number of usable CPUs (Windows only). cpu_percent (int): Usable percentage of the available CPUs (Windows only). @@ -579,6 +588,9 @@ class ContainerCollection(Collection): For example, ``/dev/sda:/dev/xvda:rwm`` allows the container to have read-write access to the host's ``/dev/sda`` via a node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. dns (:py:class:`list`): Set custom DNS servers. dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file. @@ -593,7 +605,28 @@ class ContainerCollection(Collection): group_add (:py:class:`list`): List of additional group names and/or IDs that the container process will run as. healthcheck (dict): Specify a test to perform to check that the - container is healthy. + container is healthy. The dict takes the following keys: + + - test (:py:class:`list` or str): Test to perform to determine + container health. Possible values: + + - Empty list: Inherit healthcheck from parent image + - ``["NONE"]``: Disable healthcheck + - ``["CMD", args...]``: exec arguments directly. + - ``["CMD-SHELL", command]``: Run command in the system's + default shell. + + If a string is provided, it will be used as a ``CMD-SHELL`` + command. + - interval (int): The time to wait between checks in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + - timeout (int): The time to wait before considering the check + to have hung. It should be 0 or at least 1000000 (1 ms). + - retries (int): The number of consecutive failures needed to + consider a container as unhealthy. + - start_period (int): Start period for the container to + initialize before starting health-retries countdown in + nanoseconds. It should be 0 or at least 1000000 (1 ms). hostname (str): Optional hostname for the container. init (bool): Run an init inside the container that forwards signals and reaps processes @@ -637,11 +670,12 @@ class ContainerCollection(Collection): network_mode (str): One of: - ``bridge`` Create a new network stack for the container on - on the bridge network. + the bridge network. - ``none`` No networking for this container. - ``container:<name|id>`` Reuse another container's network stack. - ``host`` Use the host network stack. + This mode is incompatible with ``ports``. Incompatible with ``network``. oom_kill_disable (bool): Whether to disable OOM killer. @@ -675,6 +709,7 @@ class ContainerCollection(Collection): to a single container port. For example, ``{'1111/tcp': [1234, 4567]}``. + Incompatible with ``host`` network mode. privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read @@ -752,6 +787,15 @@ class ContainerCollection(Collection): {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'}, '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}} + Or a list of strings which each one of its elements specifies a + mount volume. + + For example: + + .. code-block:: python + + ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1'] + volumes_from (:py:class:`list`): List of container names or IDs to get volumes from. working_dir (str): Path to the working directory. @@ -783,7 +827,7 @@ class ContainerCollection(Collection): image = image.id stream = kwargs.pop('stream', False) detach = kwargs.pop('detach', False) - platform = kwargs.pop('platform', None) + platform = kwargs.get('platform', None) if detach and remove: if version_gte(self.client.api._version, '1.25'): @@ -967,6 +1011,7 @@ RUN_CREATE_KWARGS = [ 'mac_address', 'name', 'network_disabled', + 'platform', 'stdin_open', 'stop_signal', 'tty', @@ -983,6 +1028,7 @@ RUN_HOST_CONFIG_KWARGS = [ 'cap_add', 'cap_drop', 'cgroup_parent', + 'cgroupns', 'cpu_count', 'cpu_percent', 'cpu_period', @@ -998,6 +1044,7 @@ RUN_HOST_CONFIG_KWARGS = [ 'device_write_bps', 'device_write_iops', 'devices', + 'device_requests', 'dns_opt', 'dns_search', 'dns', diff --git a/docker/models/images.py b/docker/models/images.py index 757a5a4..e3ec39d 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -2,8 +2,6 @@ import itertools import re import warnings -import six - from ..api import APIClient from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..errors import BuildError, ImageLoadError, InvalidArgument @@ -17,7 +15,10 @@ class Image(Model): An image on the server. """ def __repr__(self): - return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags)) + return "<{}: '{}'>".format( + self.__class__.__name__, + "', '".join(self.tags), + ) @property def labels(self): @@ -30,12 +31,12 @@ class Image(Model): @property def short_id(self): """ - The ID of the image truncated to 10 characters, plus the ``sha256:`` + The ID of the image truncated to 12 characters, plus the ``sha256:`` prefix. """ if self.id.startswith('sha256:'): - return self.id[:17] - return self.id[:10] + return self.id[:19] + return self.id[:12] @property def tags(self): @@ -60,6 +61,24 @@ class Image(Model): """ return self.client.api.history(self.id) + def remove(self, force=False, noprune=False): + """ + Remove this image. + + Args: + force (bool): Force removal of the image + noprune (bool): Do not delete untagged parents + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_image( + self.id, + force=force, + noprune=noprune, + ) + def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False): """ Get a tarball of an image. Similar to the ``docker save`` command. @@ -84,19 +103,19 @@ class Image(Model): Example: - >>> image = cli.get_image("busybox:latest") + >>> image = cli.images.get("busybox:latest") >>> f = open('/tmp/busybox-latest.tar', 'wb') - >>> for chunk in image: + >>> for chunk in image.save(): >>> f.write(chunk) >>> f.close() """ img = self.id if named: img = self.tags[0] if self.tags else img - if isinstance(named, six.string_types): + if isinstance(named, str): if named not in self.tags: raise InvalidArgument( - "{} is not a valid tag for this image".format(named) + f"{named} is not a valid tag for this image" ) img = named @@ -127,7 +146,7 @@ class RegistryData(Model): Image metadata stored on the registry, including available platforms. """ def __init__(self, image_name, *args, **kwargs): - super(RegistryData, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.image_name = image_name @property @@ -140,10 +159,10 @@ class RegistryData(Model): @property def short_id(self): """ - The ID of the image truncated to 10 characters, plus the ``sha256:`` + The ID of the image truncated to 12 characters, plus the ``sha256:`` prefix. """ - return self.id[:17] + return self.id[:19] def pull(self, platform=None): """ @@ -180,7 +199,7 @@ class RegistryData(Model): parts = platform.split('/') if len(parts) > 3 or len(parts) < 1: raise InvalidArgument( - '"{0}" is not a valid platform descriptor'.format(platform) + f'"{platform}" is not a valid platform descriptor' ) platform = {'os': parts[0]} if len(parts) > 2: @@ -205,10 +224,10 @@ class ImageCollection(Collection): Build an image and return it. Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` must be set. - If you have a tar file for the Docker build context (including a - Dockerfile) already, pass a readable file-like object to ``fileobj`` - and also pass ``custom_context=True``. If the stream is compressed - also, set ``encoding`` to the correct value (e.g ``gzip``). + If you already have a tar file for the Docker build context (including + a Dockerfile), pass a readable file-like object to ``fileobj`` + and also pass ``custom_context=True``. If the stream is also + compressed, set ``encoding`` to the correct value (e.g ``gzip``). If you want to get the raw output of the build, use the :py:meth:`~docker.api.build.BuildApiMixin.build` method in the @@ -265,7 +284,7 @@ class ImageCollection(Collection): Returns: (tuple): The first item is the :py:class:`Image` object for the - image that was build. The second item is a generator of the + image that was built. The second item is a generator of the build logs as JSON-decoded objects. Raises: @@ -277,7 +296,7 @@ class ImageCollection(Collection): If neither ``path`` nor ``fileobj`` is specified. """ resp = self.client.api.build(**kwargs) - if isinstance(resp, six.string_types): + if isinstance(resp, str): return self.get(resp) last_event = None image_id = None @@ -395,12 +414,13 @@ class ImageCollection(Collection): return [self.get(i) for i in images] - def pull(self, repository, tag=None, **kwargs): + def pull(self, repository, tag=None, all_tags=False, **kwargs): """ Pull an image of the given name and return it. Similar to the ``docker pull`` command. - If no tag is specified, all tags from that repository will be - pulled. + If ``tag`` is ``None`` or empty, it is set to ``latest``. + If ``all_tags`` is set, the ``tag`` parameter is ignored and all image + tags will be pulled. If you want to get the raw pull output, use the :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the @@ -413,10 +433,11 @@ class ImageCollection(Collection): config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags Returns: (:py:class:`Image` or list): The image that has been pulled. - If no ``tag`` was specified, the method will return a list + If ``all_tags`` is True, the method will return a list of :py:class:`Image` objects belonging to this repository. Raises: @@ -426,13 +447,13 @@ class ImageCollection(Collection): Example: >>> # Pull the image tagged `latest` in the busybox repo - >>> image = client.images.pull('busybox:latest') + >>> image = client.images.pull('busybox') >>> # Pull all tags in the busybox repo - >>> images = client.images.pull('busybox') + >>> images = client.images.pull('busybox', all_tags=True) """ - if not tag: - repository, tag = parse_repository_tag(repository) + repository, image_tag = parse_repository_tag(repository) + tag = tag or image_tag or 'latest' if 'stream' in kwargs: warnings.warn( @@ -442,14 +463,14 @@ class ImageCollection(Collection): del kwargs['stream'] pull_log = self.client.api.pull( - repository, tag=tag, stream=True, **kwargs + repository, tag=tag, stream=True, all_tags=all_tags, **kwargs ) for _ in pull_log: # We don't do anything with the logs, but we need # to keep the connection alive and wait for the image # to be pulled. pass - if tag: + if not all_tags: return self.get('{0}{2}{1}'.format( repository, tag, '@' if tag.startswith('sha256:') else ':' )) diff --git a/docker/models/networks.py b/docker/models/networks.py index f944c8e..093deb7 100644 --- a/docker/models/networks.py +++ b/docker/models/networks.py @@ -46,6 +46,8 @@ class Network(Model): network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. Raises: :py:class:`docker.errors.APIError` diff --git a/docker/models/plugins.py b/docker/models/plugins.py index 0688018..16f5245 100644 --- a/docker/models/plugins.py +++ b/docker/models/plugins.py @@ -7,7 +7,7 @@ class Plugin(Model): A plugin on the server. """ def __repr__(self): - return "<%s: '%s'>" % (self.__class__.__name__, self.name) + return f"<{self.__class__.__name__}: '{self.name}'>" @property def name(self): @@ -44,16 +44,19 @@ class Plugin(Model): self.client.api.configure_plugin(self.name, options) self.reload() - def disable(self): + def disable(self, force=False): """ Disable the plugin. + Args: + force (bool): Force disable. Default: False + Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ - self.client.api.disable_plugin(self.name) + self.client.api.disable_plugin(self.name, force) self.reload() def enable(self, timeout=0): @@ -117,9 +120,12 @@ class Plugin(Model): if remote is None: remote = self.name privileges = self.client.api.plugin_privileges(remote) - for d in self.client.api.upgrade_plugin(self.name, remote, privileges): - yield d - self._reload() + yield from self.client.api.upgrade_plugin( + self.name, + remote, + privileges, + ) + self.reload() class PluginCollection(Collection): diff --git a/docker/models/resource.py b/docker/models/resource.py index ed3900a..89030e5 100644 --- a/docker/models/resource.py +++ b/docker/models/resource.py @@ -1,5 +1,4 @@ - -class Model(object): +class Model: """ A base class for representing a single object on the server. """ @@ -18,13 +17,13 @@ class Model(object): self.attrs = {} def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, self.short_id) + return f"<{self.__class__.__name__}: {self.short_id}>" def __eq__(self, other): return isinstance(other, self.__class__) and self.id == other.id def __hash__(self): - return hash("%s:%s" % (self.__class__.__name__, self.id)) + return hash(f"{self.__class__.__name__}:{self.id}") @property def id(self): @@ -36,9 +35,9 @@ class Model(object): @property def short_id(self): """ - The ID of the object, truncated to 10 characters. + The ID of the object, truncated to 12 characters. """ - return self.id[:10] + return self.id[:12] def reload(self): """ @@ -49,7 +48,7 @@ class Model(object): self.attrs = new_model.attrs -class Collection(object): +class Collection: """ A base class for representing all objects of a particular type on the server. diff --git a/docker/models/secrets.py b/docker/models/secrets.py index ca11ede..da01d44 100644 --- a/docker/models/secrets.py +++ b/docker/models/secrets.py @@ -7,7 +7,7 @@ class Secret(Model): id_attribute = 'ID' def __repr__(self): - return "<%s: '%s'>" % (self.__class__.__name__, self.name) + return f"<{self.__class__.__name__}: '{self.name}'>" @property def name(self): @@ -30,6 +30,7 @@ class SecretCollection(Collection): def create(self, **kwargs): obj = self.client.api.create_secret(**kwargs) + obj.setdefault("Spec", {})["Name"] = kwargs.get("name") return self.prepare_model(obj) create.__doc__ = APIClient.create_secret.__doc__ diff --git a/docker/models/services.py b/docker/models/services.py index a35687b..0643874 100644 --- a/docker/models/services.py +++ b/docker/models/services.py @@ -157,6 +157,8 @@ class ServiceCollection(Collection): constraints. preferences (list of tuple): :py:class:`~docker.types.Placement` preferences. + maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas + or (int) representing maximum number of replicas per node. platforms (list of tuple): A list of platform constraints expressed as ``(arch, os)`` tuples. container_labels (dict): Labels to apply to the container. @@ -211,6 +213,12 @@ class ServiceCollection(Collection): to the service. privileges (Privileges): Security options for the service's containers. + cap_add (:py:class:`list`): A list of kernel capabilities to add to + the default set for the container. + cap_drop (:py:class:`list`): A list of kernel capabilities to drop + from the default set for the container. + sysctls (:py:class:`dict`): A dict of sysctl values to add to the + container Returns: :py:class:`Service`: The created service. @@ -275,6 +283,8 @@ class ServiceCollection(Collection): # kwargs to copy straight over to ContainerSpec CONTAINER_SPEC_KWARGS = [ 'args', + 'cap_add', + 'cap_drop', 'command', 'configs', 'dns_config', @@ -297,6 +307,7 @@ CONTAINER_SPEC_KWARGS = [ 'tty', 'user', 'workdir', + 'sysctls', ] # kwargs to copy straight over to TaskTemplate @@ -312,6 +323,7 @@ CREATE_SERVICE_KWARGS = [ 'labels', 'mode', 'update_config', + 'rollback_config', 'endpoint_spec', ] @@ -319,6 +331,7 @@ PLACEMENT_KWARGS = [ 'constraints', 'preferences', 'platforms', + 'maxreplicas', ] diff --git a/docker/models/swarm.py b/docker/models/swarm.py index 755c17d..b0b1a2e 100644 --- a/docker/models/swarm.py +++ b/docker/models/swarm.py @@ -11,7 +11,7 @@ class Swarm(Model): id_attribute = 'ID' def __init__(self, *args, **kwargs): - super(Swarm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if self.client: try: self.reload() diff --git a/docker/tls.py b/docker/tls.py index d4671d1..f4dffb2 100644 --- a/docker/tls.py +++ b/docker/tls.py @@ -5,15 +5,16 @@ from . import errors from .transport import SSLHTTPAdapter -class TLSConfig(object): +class TLSConfig: """ TLS configuration. Args: client_cert (tuple of str): Path to client cert, path to client key. ca_cert (str): Path to CA cert file. - verify (bool or str): This can be ``False`` or a path to a CA cert - file. + verify (bool or str): This can be a bool or a path to a CA cert + file to verify against. If ``True``, verify using ca_cert; + if ``False`` or not specified, do not verify. ssl_version (int): A valid `SSL version`_. assert_hostname (bool): Verify the hostname of the server. @@ -32,37 +33,18 @@ class TLSConfig(object): # https://docs.docker.com/engine/articles/https/ # This diverges from the Docker CLI in that users can specify 'tls' # here, but also disable any public/default CA pool verification by - # leaving tls_verify=False + # leaving verify=False self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint - # TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is - # depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead - # to exclude versions. But I think that might require a bigger - # architectural change, so I've opted not to pursue it at this time - # If the user provides an SSL version, we should use their preference if ssl_version: self.ssl_version = ssl_version else: - # If the user provides no ssl version, we should default to - # TLSv1_2. This option is the most secure, and will work for the - # majority of users with reasonably up-to-date software. However, - # before doing so, detect openssl version to ensure we can support - # it. - if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr( - ssl, 'PROTOCOL_TLSv1_2'): - # If the OpenSSL version is high enough to support TLSv1_2, - # then we should use it. - self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2') - else: - # Otherwise, TLS v1.0 seems to be the safest default; - # SSLv23 fails in mysterious ways: - # https://github.com/docker/docker-py/issues/963 - self.ssl_version = ssl.PROTOCOL_TLSv1 - - # "tls" and "tls_verify" must have both or neither cert/key files In + self.ssl_version = ssl.PROTOCOL_TLS_CLIENT + + # "client_cert" must have both or neither cert/key files. In # either case, Alert the user when both are expected, but any are # missing. @@ -71,7 +53,7 @@ class TLSConfig(object): tls_cert, tls_key = client_cert except ValueError: raise errors.TLSParameterError( - 'client_config must be a tuple of' + 'client_cert must be a tuple of' ' (client certificate, key file)' ) @@ -79,7 +61,7 @@ class TLSConfig(object): not os.path.isfile(tls_key)): raise errors.TLSParameterError( 'Path to a certificate and key files must be provided' - ' through the client_config param' + ' through the client_cert param' ) self.cert = (tls_cert, tls_key) @@ -88,7 +70,7 @@ class TLSConfig(object): self.ca_cert = ca_cert if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): raise errors.TLSParameterError( - 'Invalid CA certificate provided for `tls_ca_cert`.' + 'Invalid CA certificate provided for `ca_cert`.' ) def configure_client(self, client): diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py index 4d819b6..dfbb193 100644 --- a/docker/transport/basehttpadapter.py +++ b/docker/transport/basehttpadapter.py @@ -3,6 +3,6 @@ import requests.adapters class BaseHTTPAdapter(requests.adapters.HTTPAdapter): def close(self): - super(BaseHTTPAdapter, self).close() + super().close() if hasattr(self, 'pools'): self.pools.clear() diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py index aa05538..87033cf 100644 --- a/docker/transport/npipeconn.py +++ b/docker/transport/npipeconn.py @@ -1,14 +1,11 @@ -import six +import queue import requests.adapters from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants from .npipesocket import NpipeSocket -if six.PY3: - import http.client as httplib -else: - import httplib +import http.client as httplib try: import requests.packages.urllib3 as urllib3 @@ -18,9 +15,9 @@ except ImportError: RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer -class NpipeHTTPConnection(httplib.HTTPConnection, object): +class NpipeHTTPConnection(httplib.HTTPConnection): def __init__(self, npipe_path, timeout=60): - super(NpipeHTTPConnection, self).__init__( + super().__init__( 'localhost', timeout=timeout ) self.npipe_path = npipe_path @@ -35,7 +32,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object): class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): def __init__(self, npipe_path, timeout=60, maxsize=10): - super(NpipeHTTPConnectionPool, self).__init__( + super().__init__( 'localhost', timeout=timeout, maxsize=maxsize ) self.npipe_path = npipe_path @@ -57,14 +54,14 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): except AttributeError: # self.pool is None raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") - except six.moves.queue.Empty: + except queue.Empty: if self.block: raise urllib3.exceptions.EmptyPoolError( self, "Pool reached maximum size and no more " "connections are allowed." ) - pass # Oh well, we'll create a new connection then + # Oh well, we'll create a new connection then return conn or self._new_conn() @@ -73,16 +70,19 @@ class NpipeHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', 'pools', - 'timeout'] + 'timeout', + 'max_pool_size'] def __init__(self, base_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): self.npipe_path = base_url.replace('npipe://', '') self.timeout = timeout + self.max_pool_size = max_pool_size self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) - super(NpipeHTTPAdapter, self).__init__() + super().__init__() def get_connection(self, url, proxies=None): with self.pools.lock: @@ -91,7 +91,8 @@ class NpipeHTTPAdapter(BaseHTTPAdapter): return pool pool = NpipeHTTPConnectionPool( - self.npipe_path, self.timeout + self.npipe_path, self.timeout, + maxsize=self.max_pool_size ) self.pools[url] = pool diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py index 176b5c8..766372a 100644 --- a/docker/transport/npipesocket.py +++ b/docker/transport/npipesocket.py @@ -2,7 +2,6 @@ import functools import time import io -import six import win32file import win32pipe @@ -24,7 +23,7 @@ def check_closed(f): return wrapped -class NpipeSocket(object): +class NpipeSocket: """ Partial implementation of the socket API over windows named pipes. This implementation is only designed to be used as a client socket, and server-specific methods (bind, listen, accept...) are not @@ -128,9 +127,6 @@ class NpipeSocket(object): @check_closed def recv_into(self, buf, nbytes=0): - if six.PY2: - return self._recv_into_py2(buf, nbytes) - readbuf = buf if not isinstance(buf, memoryview): readbuf = memoryview(buf) @@ -195,7 +191,7 @@ class NpipeFileIOBase(io.RawIOBase): self.sock = npipe_socket def close(self): - super(NpipeFileIOBase, self).close() + super().close() self.sock = None def fileno(self): diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py index 7de0e59..7421f33 100644 --- a/docker/transport/sshconn.py +++ b/docker/transport/sshconn.py @@ -1,16 +1,17 @@ import paramiko +import queue +import urllib.parse import requests.adapters -import six import logging import os +import signal +import socket +import subprocess from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants -if six.PY3: - import http.client as httplib -else: - import httplib +import http.client as httplib try: import requests.packages.urllib3 as urllib3 @@ -20,33 +21,121 @@ except ImportError: RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer -class SSHConnection(httplib.HTTPConnection, object): - def __init__(self, ssh_transport, timeout=60): - super(SSHConnection, self).__init__( +class SSHSocket(socket.socket): + def __init__(self, host): + super().__init__( + socket.AF_INET, socket.SOCK_STREAM) + self.host = host + self.port = None + self.user = None + if ':' in self.host: + self.host, self.port = self.host.split(':') + if '@' in self.host: + self.user, self.host = self.host.split('@') + + self.proc = None + + def connect(self, **kwargs): + args = ['ssh'] + if self.user: + args = args + ['-l', self.user] + + if self.port: + args = args + ['-p', self.port] + + args = args + ['--', self.host, 'docker system dial-stdio'] + + preexec_func = None + if not constants.IS_WINDOWS_PLATFORM: + def f(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + preexec_func = f + + env = dict(os.environ) + + # drop LD_LIBRARY_PATH and SSL_CERT_FILE + env.pop('LD_LIBRARY_PATH', None) + env.pop('SSL_CERT_FILE', None) + + self.proc = subprocess.Popen( + args, + env=env, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=preexec_func) + + def _write(self, data): + if not self.proc or self.proc.stdin.closed: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + written = self.proc.stdin.write(data) + self.proc.stdin.flush() + return written + + def sendall(self, data): + self._write(data) + + def send(self, data): + return self._write(data) + + def recv(self, n): + if not self.proc: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + return self.proc.stdout.read(n) + + def makefile(self, mode): + if not self.proc: + self.connect() + self.proc.stdout.channel = self + + return self.proc.stdout + + def close(self): + if not self.proc or self.proc.stdin.closed: + return + self.proc.stdin.write(b'\n\n') + self.proc.stdin.flush() + self.proc.terminate() + + +class SSHConnection(httplib.HTTPConnection): + def __init__(self, ssh_transport=None, timeout=60, host=None): + super().__init__( 'localhost', timeout=timeout ) self.ssh_transport = ssh_transport self.timeout = timeout + self.ssh_host = host def connect(self): - sock = self.ssh_transport.open_session() - sock.settimeout(self.timeout) - sock.exec_command('docker system dial-stdio') + if self.ssh_transport: + sock = self.ssh_transport.open_session() + sock.settimeout(self.timeout) + sock.exec_command('docker system dial-stdio') + else: + sock = SSHSocket(self.ssh_host) + sock.settimeout(self.timeout) + sock.connect() + self.sock = sock class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): scheme = 'ssh' - def __init__(self, ssh_client, timeout=60, maxsize=10): - super(SSHConnectionPool, self).__init__( + def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None): + super().__init__( 'localhost', timeout=timeout, maxsize=maxsize ) - self.ssh_transport = ssh_client.get_transport() + self.ssh_transport = None self.timeout = timeout + if ssh_client: + self.ssh_transport = ssh_client.get_transport() + self.ssh_host = host def _new_conn(self): - return SSHConnection(self.ssh_transport, self.timeout) + return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host) # When re-using connections, urllib3 calls fileno() on our # SSH channel instance, quickly overloading our fd limit. To avoid this, @@ -59,14 +148,14 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): except AttributeError: # self.pool is None raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") - except six.moves.queue.Empty: + except queue.Empty: if self.block: raise urllib3.exceptions.EmptyPoolError( self, "Pool reached maximum size and no more " "connections are allowed." ) - pass # Oh well, we'll create a new connection then + # Oh well, we'll create a new connection then return conn or self._new_conn() @@ -74,14 +163,33 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): class SSHHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ - 'pools', 'timeout', 'ssh_client', 'ssh_params' + 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size' ] def __init__(self, base_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE, + shell_out=False): + self.ssh_client = None + if not shell_out: + self._create_paramiko_client(base_url) + self._connect() + + self.ssh_host = base_url + if base_url.startswith('ssh://'): + self.ssh_host = base_url[len('ssh://'):] + + self.timeout = timeout + self.max_pool_size = max_pool_size + self.pools = RecentlyUsedContainer( + pool_connections, dispose_func=lambda p: p.close() + ) + super().__init__() + + def _create_paramiko_client(self, base_url): logging.getLogger("paramiko").setLevel(logging.WARNING) self.ssh_client = paramiko.SSHClient() - base_url = six.moves.urllib_parse.urlparse(base_url) + base_url = urllib.parse.urlparse(base_url) self.ssh_params = { "hostname": base_url.hostname, "port": base_url.port, @@ -93,48 +201,54 @@ class SSHHTTPAdapter(BaseHTTPAdapter): with open(ssh_config_file) as f: conf.parse(f) host_config = conf.lookup(base_url.hostname) - self.ssh_conf = host_config if 'proxycommand' in host_config: self.ssh_params["sock"] = paramiko.ProxyCommand( - self.ssh_conf['proxycommand'] + host_config['proxycommand'] ) if 'hostname' in host_config: self.ssh_params['hostname'] = host_config['hostname'] if base_url.port is None and 'port' in host_config: - self.ssh_params['port'] = self.ssh_conf['port'] + self.ssh_params['port'] = host_config['port'] if base_url.username is None and 'user' in host_config: - self.ssh_params['username'] = self.ssh_conf['user'] + self.ssh_params['username'] = host_config['user'] + if 'identityfile' in host_config: + self.ssh_params['key_filename'] = host_config['identityfile'] self.ssh_client.load_system_host_keys() - self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy()) - - self._connect() - self.timeout = timeout - self.pools = RecentlyUsedContainer( - pool_connections, dispose_func=lambda p: p.close() - ) - super(SSHHTTPAdapter, self).__init__() + self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy()) def _connect(self): - self.ssh_client.connect(**self.ssh_params) + if self.ssh_client: + self.ssh_client.connect(**self.ssh_params) def get_connection(self, url, proxies=None): + if not self.ssh_client: + return SSHConnectionPool( + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host + ) with self.pools.lock: pool = self.pools.get(url) if pool: return pool # Connection is closed try a reconnect - if not self.ssh_client.get_transport(): + if self.ssh_client and not self.ssh_client.get_transport(): self._connect() pool = SSHConnectionPool( - self.ssh_client, self.timeout + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host ) self.pools[url] = pool return pool def close(self): - super(SSHHTTPAdapter, self).close() - self.ssh_client.close() + super().close() + if self.ssh_client: + self.ssh_client.close() diff --git a/docker/transport/ssladapter.py b/docker/transport/ssladapter.py index 12de76c..6aa8003 100644 --- a/docker/transport/ssladapter.py +++ b/docker/transport/ssladapter.py @@ -2,9 +2,7 @@ https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/ https://github.com/kennethreitz/requests/pull/799 """ -import sys - -from distutils.version import StrictVersion +from packaging.version import Version from requests.adapters import HTTPAdapter from docker.transport.basehttpadapter import BaseHTTPAdapter @@ -17,12 +15,6 @@ except ImportError: PoolManager = urllib3.poolmanager.PoolManager -# Monkey-patching match_hostname with a version that supports -# IP-address checking. Not necessary for Python 3.5 and above -if sys.version_info[0] < 3 or sys.version_info[1] < 5: - from backports.ssl_match_hostname import match_hostname - urllib3.connection.match_hostname = match_hostname - class SSLHTTPAdapter(BaseHTTPAdapter): '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' @@ -36,7 +28,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter): self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint - super(SSLHTTPAdapter, self).__init__(**kwargs) + super().__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): kwargs = { @@ -59,7 +51,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter): But we still need to take care of when there is a proxy poolmanager """ - conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs) + conn = super().get_connection(*args, **kwargs) if conn.assert_hostname != self.assert_hostname: conn.assert_hostname = self.assert_hostname return conn @@ -70,4 +62,4 @@ class SSLHTTPAdapter(BaseHTTPAdapter): return False if urllib_ver == 'dev': return True - return StrictVersion(urllib_ver) > StrictVersion('1.5') + return Version(urllib_ver) > Version('1.5') diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py index b619103..1b00762 100644 --- a/docker/transport/unixconn.py +++ b/docker/transport/unixconn.py @@ -1,7 +1,6 @@ -import six import requests.adapters import socket -from six.moves import http_client as httplib +import http.client as httplib from docker.transport.basehttpadapter import BaseHTTPAdapter from .. import constants @@ -15,27 +14,15 @@ except ImportError: RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer -class UnixHTTPResponse(httplib.HTTPResponse, object): - def __init__(self, sock, *args, **kwargs): - disable_buffering = kwargs.pop('disable_buffering', False) - if six.PY2: - # FIXME: We may need to disable buffering on Py3 as well, - # but there's no clear way to do it at the moment. See: - # https://github.com/docker/docker-py/issues/1799 - kwargs['buffering'] = not disable_buffering - super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs) - - -class UnixHTTPConnection(httplib.HTTPConnection, object): +class UnixHTTPConnection(httplib.HTTPConnection): def __init__(self, base_url, unix_socket, timeout=60): - super(UnixHTTPConnection, self).__init__( + super().__init__( 'localhost', timeout=timeout ) self.base_url = base_url self.unix_socket = unix_socket self.timeout = timeout - self.disable_buffering = False def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -44,20 +31,15 @@ class UnixHTTPConnection(httplib.HTTPConnection, object): self.sock = sock def putheader(self, header, *values): - super(UnixHTTPConnection, self).putheader(header, *values) - if header == 'Connection' and 'Upgrade' in values: - self.disable_buffering = True + super().putheader(header, *values) def response_class(self, sock, *args, **kwargs): - if self.disable_buffering: - kwargs['disable_buffering'] = True - - return UnixHTTPResponse(sock, *args, **kwargs) + return httplib.HTTPResponse(sock, *args, **kwargs) class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): def __init__(self, base_url, socket_path, timeout=60, maxsize=10): - super(UnixHTTPConnectionPool, self).__init__( + super().__init__( 'localhost', timeout=timeout, maxsize=maxsize ) self.base_url = base_url @@ -74,19 +56,22 @@ class UnixHTTPAdapter(BaseHTTPAdapter): __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', 'socket_path', - 'timeout'] + 'timeout', + 'max_pool_size'] def __init__(self, socket_url, timeout=60, - pool_connections=constants.DEFAULT_NUM_POOLS): + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): socket_path = socket_url.replace('http+unix://', '') if not socket_path.startswith('/'): socket_path = '/' + socket_path self.socket_path = socket_path self.timeout = timeout + self.max_pool_size = max_pool_size self.pools = RecentlyUsedContainer( pool_connections, dispose_func=lambda p: p.close() ) - super(UnixHTTPAdapter, self).__init__() + super().__init__() def get_connection(self, url, proxies=None): with self.pools.lock: @@ -95,7 +80,8 @@ class UnixHTTPAdapter(BaseHTTPAdapter): return pool pool = UnixHTTPConnectionPool( - url, self.socket_path, self.timeout + url, self.socket_path, self.timeout, + maxsize=self.max_pool_size ) self.pools[url] = pool diff --git a/docker/types/__init__.py b/docker/types/__init__.py index 5db330e..b425746 100644 --- a/docker/types/__init__.py +++ b/docker/types/__init__.py @@ -1,5 +1,7 @@ # flake8: noqa -from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit +from .containers import ( + ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest +) from .daemon import CancellableStream from .healthcheck import Healthcheck from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig diff --git a/docker/types/base.py b/docker/types/base.py index 6891062..8851f1e 100644 --- a/docker/types/base.py +++ b/docker/types/base.py @@ -1,7 +1,4 @@ -import six - - class DictType(dict): def __init__(self, init): - for k, v in six.iteritems(init): + for k, v in init.items(): self[k] = v diff --git a/docker/types/containers.py b/docker/types/containers.py index fd8cab4..84df0f7 100644 --- a/docker/types/containers.py +++ b/docker/types/containers.py @@ -1,5 +1,3 @@ -import six - from .. import errors from ..utils.utils import ( convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds, @@ -10,7 +8,7 @@ from .base import DictType from .healthcheck import Healthcheck -class LogConfigTypesEnum(object): +class LogConfigTypesEnum: _values = ( 'json-file', 'syslog', @@ -61,7 +59,7 @@ class LogConfig(DictType): if config and not isinstance(config, dict): raise ValueError("LogConfig.config must be a dictionary") - super(LogConfig, self).__init__({ + super().__init__({ 'Type': log_driver_type, 'Config': config }) @@ -97,8 +95,8 @@ class Ulimit(DictType): Args: - name (str): Which ulimit will this apply to. A list of valid names can - be found `here <http://tinyurl.me/ZWRkM2Ztwlykf>`_. + name (str): Which ulimit will this apply to. The valid names can be + found in '/etc/security/limits.conf' on a gnu/linux system. soft (int): The soft limit for this ulimit. Optional. hard (int): The hard limit for this ulimit. Optional. @@ -117,13 +115,13 @@ class Ulimit(DictType): name = kwargs.get('name', kwargs.get('Name')) soft = kwargs.get('soft', kwargs.get('Soft')) hard = kwargs.get('hard', kwargs.get('Hard')) - if not isinstance(name, six.string_types): + if not isinstance(name, str): raise ValueError("Ulimit.name must be a string") if soft and not isinstance(soft, int): raise ValueError("Ulimit.soft must be an integer") if hard and not isinstance(hard, int): raise ValueError("Ulimit.hard must be an integer") - super(Ulimit, self).__init__({ + super().__init__({ 'Name': name, 'Soft': soft, 'Hard': hard @@ -154,6 +152,104 @@ class Ulimit(DictType): self['Hard'] = value +class DeviceRequest(DictType): + """ + Create a device request to be used with + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + + Args: + + driver (str): Which driver to use for this device. Optional. + count (int): Number or devices to request. Optional. + Set to -1 to request all available devices. + device_ids (list): List of strings for device IDs. Optional. + Set either ``count`` or ``device_ids``. + capabilities (list): List of lists of strings to request + capabilities. Optional. The global list acts like an OR, + and the sub-lists are AND. The driver will try to satisfy + one of the sub-lists. + Available capabilities for the ``nvidia`` driver can be found + `here <https://github.com/NVIDIA/nvidia-container-runtime>`_. + options (dict): Driver-specific options. Optional. + """ + + def __init__(self, **kwargs): + driver = kwargs.get('driver', kwargs.get('Driver')) + count = kwargs.get('count', kwargs.get('Count')) + device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs')) + capabilities = kwargs.get('capabilities', kwargs.get('Capabilities')) + options = kwargs.get('options', kwargs.get('Options')) + + if driver is None: + driver = '' + elif not isinstance(driver, str): + raise ValueError('DeviceRequest.driver must be a string') + if count is None: + count = 0 + elif not isinstance(count, int): + raise ValueError('DeviceRequest.count must be an integer') + if device_ids is None: + device_ids = [] + elif not isinstance(device_ids, list): + raise ValueError('DeviceRequest.device_ids must be a list') + if capabilities is None: + capabilities = [] + elif not isinstance(capabilities, list): + raise ValueError('DeviceRequest.capabilities must be a list') + if options is None: + options = {} + elif not isinstance(options, dict): + raise ValueError('DeviceRequest.options must be a dict') + + super().__init__({ + 'Driver': driver, + 'Count': count, + 'DeviceIDs': device_ids, + 'Capabilities': capabilities, + 'Options': options + }) + + @property + def driver(self): + return self['Driver'] + + @driver.setter + def driver(self, value): + self['Driver'] = value + + @property + def count(self): + return self['Count'] + + @count.setter + def count(self, value): + self['Count'] = value + + @property + def device_ids(self): + return self['DeviceIDs'] + + @device_ids.setter + def device_ids(self, value): + self['DeviceIDs'] = value + + @property + def capabilities(self): + return self['Capabilities'] + + @capabilities.setter + def capabilities(self, value): + self['Capabilities'] = value + + @property + def options(self): + return self['Options'] + + @options.setter + def options(self, value): + self['Options'] = value + + class HostConfig(dict): def __init__(self, version, binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=False, links=None, @@ -176,7 +272,8 @@ class HostConfig(dict): volume_driver=None, cpu_count=None, cpu_percent=None, nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, cpu_rt_period=None, cpu_rt_runtime=None, - device_cgroup_rules=None): + device_cgroup_rules=None, device_requests=None, + cgroupns=None): if mem_limit is not None: self['Memory'] = parse_bytes(mem_limit) @@ -199,7 +296,7 @@ class HostConfig(dict): self['MemorySwappiness'] = mem_swappiness if shm_size is not None: - if isinstance(shm_size, six.string_types): + if isinstance(shm_size, str): shm_size = parse_bytes(shm_size) self['ShmSize'] = shm_size @@ -236,10 +333,11 @@ class HostConfig(dict): if dns_search: self['DnsSearch'] = dns_search - if network_mode: - self['NetworkMode'] = network_mode - elif network_mode is None: - self['NetworkMode'] = 'default' + if network_mode == 'host' and port_bindings: + raise host_config_incompatible_error( + 'network_mode', 'host', 'port_bindings' + ) + self['NetworkMode'] = network_mode or 'default' if restart_policy: if not isinstance(restart_policy, dict): @@ -259,7 +357,7 @@ class HostConfig(dict): self['Devices'] = parse_devices(devices) if group_add: - self['GroupAdd'] = [six.text_type(grp) for grp in group_add] + self['GroupAdd'] = [str(grp) for grp in group_add] if dns is not None: self['Dns'] = dns @@ -279,11 +377,11 @@ class HostConfig(dict): if not isinstance(sysctls, dict): raise host_config_type_error('sysctls', sysctls, 'dict') self['Sysctls'] = {} - for k, v in six.iteritems(sysctls): - self['Sysctls'][k] = six.text_type(v) + for k, v in sysctls.items(): + self['Sysctls'][k] = str(v) if volumes_from is not None: - if isinstance(volumes_from, six.string_types): + if isinstance(volumes_from, str): volumes_from = volumes_from.split(',') self['VolumesFrom'] = volumes_from @@ -305,7 +403,7 @@ class HostConfig(dict): if isinstance(lxc_conf, dict): formatted = [] - for k, v in six.iteritems(lxc_conf): + for k, v in lxc_conf.items(): formatted.append({'Key': k, 'Value': str(v)}) lxc_conf = formatted @@ -460,7 +558,7 @@ class HostConfig(dict): self["PidsLimit"] = pids_limit if isolation: - if not isinstance(isolation, six.string_types): + if not isinstance(isolation, str): raise host_config_type_error('isolation', isolation, 'string') if version_lt(version, '1.24'): raise host_config_version_error('isolation', '1.24') @@ -510,7 +608,7 @@ class HostConfig(dict): self['CpuPercent'] = cpu_percent if nano_cpus: - if not isinstance(nano_cpus, six.integer_types): + if not isinstance(nano_cpus, int): raise host_config_type_error('nano_cpus', nano_cpus, 'int') if version_lt(version, '1.25'): raise host_config_version_error('nano_cpus', '1.25') @@ -536,6 +634,22 @@ class HostConfig(dict): ) self['DeviceCgroupRules'] = device_cgroup_rules + if device_requests is not None: + if version_lt(version, '1.40'): + raise host_config_version_error('device_requests', '1.40') + if not isinstance(device_requests, list): + raise host_config_type_error( + 'device_requests', device_requests, 'list' + ) + self['DeviceRequests'] = [] + for req in device_requests: + if not isinstance(req, DeviceRequest): + req = DeviceRequest(**req) + self['DeviceRequests'].append(req) + + if cgroupns: + self['CgroupnsMode'] = cgroupns + def host_config_type_error(param, param_value, expected): error_msg = 'Invalid type for {0} param: expected {1} but found {2}' @@ -553,6 +667,13 @@ def host_config_value_error(param, param_value): return ValueError(error_msg.format(param, param_value)) +def host_config_incompatible_error(param, param_value, incompatible_param): + error_msg = '\"{1}\" {0} is incompatible with {2}' + return errors.InvalidArgument( + error_msg.format(param, param_value, incompatible_param) + ) + + class ContainerConfig(dict): def __init__( self, version, image, command, hostname=None, user=None, detach=False, @@ -580,17 +701,17 @@ class ContainerConfig(dict): 'version 1.29' ) - if isinstance(command, six.string_types): + if isinstance(command, str): command = split_command(command) - if isinstance(entrypoint, six.string_types): + if isinstance(entrypoint, str): entrypoint = split_command(entrypoint) if isinstance(environment, dict): environment = format_environment(environment) if isinstance(labels, list): - labels = dict((lbl, six.text_type('')) for lbl in labels) + labels = {lbl: '' for lbl in labels} if isinstance(ports, list): exposed_ports = {} @@ -601,10 +722,10 @@ class ContainerConfig(dict): if len(port_definition) == 2: proto = port_definition[1] port = port_definition[0] - exposed_ports['{0}/{1}'.format(port, proto)] = {} + exposed_ports[f'{port}/{proto}'] = {} ports = exposed_ports - if isinstance(volumes, six.string_types): + if isinstance(volumes, str): volumes = [volumes, ] if isinstance(volumes, list): @@ -633,7 +754,7 @@ class ContainerConfig(dict): 'Hostname': hostname, 'Domainname': domainname, 'ExposedPorts': ports, - 'User': six.text_type(user) if user is not None else None, + 'User': str(user) if user is not None else None, 'Tty': tty, 'OpenStdin': stdin_open, 'StdinOnce': stdin_once, diff --git a/docker/types/daemon.py b/docker/types/daemon.py index af3e5bc..10e8101 100644 --- a/docker/types/daemon.py +++ b/docker/types/daemon.py @@ -8,7 +8,7 @@ except ImportError: from ..errors import DockerException -class CancellableStream(object): +class CancellableStream: """ Stream wrapper for real-time events, logs, etc. from the server. @@ -32,7 +32,7 @@ class CancellableStream(object): return next(self._stream) except urllib3.exceptions.ProtocolError: raise StopIteration - except socket.error: + except OSError: raise StopIteration next = __next__ diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py index 9815018..dfc88a9 100644 --- a/docker/types/healthcheck.py +++ b/docker/types/healthcheck.py @@ -1,7 +1,5 @@ from .base import DictType -import six - class Healthcheck(DictType): """ @@ -31,7 +29,7 @@ class Healthcheck(DictType): """ def __init__(self, **kwargs): test = kwargs.get('test', kwargs.get('Test')) - if isinstance(test, six.string_types): + if isinstance(test, str): test = ["CMD-SHELL", test] interval = kwargs.get('interval', kwargs.get('Interval')) @@ -39,7 +37,7 @@ class Healthcheck(DictType): retries = kwargs.get('retries', kwargs.get('Retries')) start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) - super(Healthcheck, self).__init__({ + super().__init__({ 'Test': test, 'Interval': interval, 'Timeout': timeout, @@ -53,7 +51,7 @@ class Healthcheck(DictType): @test.setter def test(self, value): - if isinstance(value, six.string_types): + if isinstance(value, str): value = ["CMD-SHELL", value] self['Test'] = value diff --git a/docker/types/networks.py b/docker/types/networks.py index 442adb1..ed1ced1 100644 --- a/docker/types/networks.py +++ b/docker/types/networks.py @@ -4,7 +4,8 @@ from ..utils import normalize_links, version_lt class EndpointConfig(dict): def __init__(self, version, aliases=None, links=None, ipv4_address=None, - ipv6_address=None, link_local_ips=None, mac_address=None): + ipv6_address=None, link_local_ips=None, driver_opt=None, + mac_address=None): if version_lt(version, '1.22'): raise errors.InvalidVersion( 'Endpoint config is not supported for API version < 1.22' @@ -40,6 +41,15 @@ class EndpointConfig(dict): if ipam_config: self['IPAMConfig'] = ipam_config + if driver_opt: + if version_lt(version, '1.32'): + raise errors.InvalidVersion( + 'DriverOpts is not supported for API version < 1.32' + ) + if not isinstance(driver_opt, dict): + raise TypeError('driver_opt must be a dictionary') + self['DriverOpts'] = driver_opt + class NetworkingConfig(dict): def __init__(self, endpoints_config=None): diff --git a/docker/types/services.py b/docker/types/services.py index 05dda15..a3383ef 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -1,5 +1,3 @@ -import six - from .. import errors from ..constants import IS_WINDOWS_PLATFORM from ..utils import ( @@ -31,6 +29,7 @@ class TaskTemplate(dict): force_update (int): A counter that triggers an update even if no relevant parameters have been changed. """ + def __init__(self, container_spec, resources=None, restart_policy=None, placement=None, log_driver=None, networks=None, force_update=None): @@ -112,16 +111,24 @@ class ContainerSpec(dict): containers. Only used for Windows containers. init (boolean): Run an init inside the container that forwards signals and reaps processes. + cap_add (:py:class:`list`): A list of kernel capabilities to add to the + default set for the container. + cap_drop (:py:class:`list`): A list of kernel capabilities to drop from + the default set for the container. + sysctls (:py:class:`dict`): A dict of sysctl values to add to + the container """ + def __init__(self, image, command=None, args=None, hostname=None, env=None, workdir=None, user=None, labels=None, mounts=None, stop_grace_period=None, secrets=None, tty=None, groups=None, open_stdin=None, read_only=None, stop_signal=None, healthcheck=None, hosts=None, dns_config=None, configs=None, - privileges=None, isolation=None, init=None): + privileges=None, isolation=None, init=None, cap_add=None, + cap_drop=None, sysctls=None): self['Image'] = image - if isinstance(command, six.string_types): + if isinstance(command, str): command = split_command(command) self['Command'] = command self['Args'] = args @@ -151,7 +158,7 @@ class ContainerSpec(dict): if mounts is not None: parsed_mounts = [] for mount in mounts: - if isinstance(mount, six.string_types): + if isinstance(mount, str): parsed_mounts.append(Mount.parse_mount_string(mount)) else: # If mount already parsed @@ -188,6 +195,24 @@ class ContainerSpec(dict): if init is not None: self['Init'] = init + if cap_add is not None: + if not isinstance(cap_add, list): + raise TypeError('cap_add must be a list') + + self['CapabilityAdd'] = cap_add + + if cap_drop is not None: + if not isinstance(cap_drop, list): + raise TypeError('cap_drop must be a list') + + self['CapabilityDrop'] = cap_drop + + if sysctls is not None: + if not isinstance(sysctls, dict): + raise TypeError('sysctls must be a dict') + + self['Sysctls'] = sysctls + class Mount(dict): """ @@ -216,6 +241,7 @@ class Mount(dict): tmpfs_size (int or string): The size for the tmpfs mount in bytes. tmpfs_mode (int): The permission mode for the tmpfs mount. """ + def __init__(self, target, source, type='volume', read_only=False, consistency=None, propagation=None, no_copy=False, labels=None, driver_config=None, tmpfs_size=None, @@ -224,7 +250,7 @@ class Mount(dict): self['Source'] = source if type not in ('bind', 'volume', 'tmpfs', 'npipe'): raise errors.InvalidArgument( - 'Unsupported mount type: "{}"'.format(type) + f'Unsupported mount type: "{type}"' ) self['Type'] = type self['ReadOnly'] = read_only @@ -260,7 +286,7 @@ class Mount(dict): elif type == 'tmpfs': tmpfs_opts = {} if tmpfs_mode: - if not isinstance(tmpfs_mode, six.integer_types): + if not isinstance(tmpfs_mode, int): raise errors.InvalidArgument( 'tmpfs_mode must be an integer' ) @@ -280,7 +306,7 @@ class Mount(dict): parts = string.split(':') if len(parts) > 3: raise errors.InvalidArgument( - 'Invalid mount format "{0}"'.format(string) + f'Invalid mount format "{string}"' ) if len(parts) == 1: return cls(target=parts[0], source=None) @@ -316,6 +342,7 @@ class Resources(dict): ``{ resource_name: resource_value }``. Alternatively, a list of of resource specifications as defined by the Engine API. """ + def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, mem_reservation=None, generic_resources=None): limits = {} @@ -347,7 +374,7 @@ def _convert_generic_resources_dict(generic_resources): ' (found {})'.format(type(generic_resources)) ) resources = [] - for kind, value in six.iteritems(generic_resources): + for kind, value in generic_resources.items(): resource_type = None if isinstance(value, int): resource_type = 'DiscreteResourceSpec' @@ -384,8 +411,9 @@ class UpdateConfig(dict): an update before the failure action is invoked, specified as a floating point number between 0 and 1. Default: 0 order (string): Specifies the order of operations when rolling out an - updated task. Either ``start_first`` or ``stop_first`` are accepted. + updated task. Either ``start-first`` or ``stop-first`` are accepted. """ + def __init__(self, parallelism=0, delay=None, failure_action='continue', monitor=None, max_failure_ratio=None, order=None): self['Parallelism'] = parallelism @@ -421,7 +449,8 @@ class UpdateConfig(dict): class RollbackConfig(UpdateConfig): """ - Used to specify the way containe rollbacks should be performed by a service + Used to specify the way container rollbacks should be performed by a + service Args: parallelism (int): Maximum number of tasks to be rolled back in one @@ -437,13 +466,13 @@ class RollbackConfig(UpdateConfig): a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. Default: 0 order (string): Specifies the order of operations when rolling out a - rolled back task. Either ``start_first`` or ``stop_first`` are + rolled back task. Either ``start-first`` or ``stop-first`` are accepted. """ pass -class RestartConditionTypesEnum(object): +class RestartConditionTypesEnum: _values = ( 'none', 'on-failure', @@ -474,7 +503,7 @@ class RestartPolicy(dict): max_attempts=0, window=0): if condition not in self.condition_types._values: raise TypeError( - 'Invalid RestartPolicy condition {0}'.format(condition) + f'Invalid RestartPolicy condition {condition}' ) self['Condition'] = condition @@ -496,6 +525,7 @@ class DriverConfig(dict): name (string): Name of the driver to use. options (dict): Driver-specific options. Default: ``None``. """ + def __init__(self, name, options=None): self['Name'] = name if options: @@ -517,6 +547,7 @@ class EndpointSpec(dict): is ``(target_port [, protocol [, publish_mode]])``. Ports can only be provided if the ``vip`` resolution mode is used. """ + def __init__(self, mode=None, ports=None): if ports: self['Ports'] = convert_service_ports(ports) @@ -533,7 +564,7 @@ def convert_service_ports(ports): ) result = [] - for k, v in six.iteritems(ports): + for k, v in ports.items(): port_spec = { 'Protocol': 'tcp', 'PublishedPort': k @@ -559,37 +590,70 @@ def convert_service_ports(ports): class ServiceMode(dict): """ - Indicate whether a service should be deployed as a replicated or global - service, and associated parameters + Indicate whether a service or a job should be deployed as a replicated + or global service, and associated parameters Args: - mode (string): Can be either ``replicated`` or ``global`` + mode (string): Can be either ``replicated``, ``global``, + ``replicated-job`` or ``global-job`` replicas (int): Number of replicas. For replicated services only. + concurrency (int): Number of concurrent jobs. For replicated job + services only. """ - def __init__(self, mode, replicas=None): - if mode not in ('replicated', 'global'): - raise errors.InvalidArgument( - 'mode must be either "replicated" or "global"' - ) - if mode != 'replicated' and replicas is not None: + + def __init__(self, mode, replicas=None, concurrency=None): + replicated_modes = ('replicated', 'replicated-job') + supported_modes = replicated_modes + ('global', 'global-job') + + if mode not in supported_modes: raise errors.InvalidArgument( - 'replicas can only be used for replicated mode' + 'mode must be either "replicated", "global", "replicated-job"' + ' or "global-job"' ) - self[mode] = {} + + if mode not in replicated_modes: + if replicas is not None: + raise errors.InvalidArgument( + 'replicas can only be used for "replicated" or' + ' "replicated-job" mode' + ) + + if concurrency is not None: + raise errors.InvalidArgument( + 'concurrency can only be used for "replicated-job" mode' + ) + + service_mode = self._convert_mode(mode) + self.mode = service_mode + self[service_mode] = {} + if replicas is not None: - self[mode]['Replicas'] = replicas + if mode == 'replicated': + self[service_mode]['Replicas'] = replicas - @property - def mode(self): - if 'global' in self: - return 'global' - return 'replicated' + if mode == 'replicated-job': + self[service_mode]['MaxConcurrent'] = concurrency or 1 + self[service_mode]['TotalCompletions'] = replicas + + @staticmethod + def _convert_mode(original_mode): + if original_mode == 'global-job': + return 'GlobalJob' + + if original_mode == 'replicated-job': + return 'ReplicatedJob' + + return original_mode @property def replicas(self): - if self.mode != 'replicated': - return None - return self['replicated'].get('Replicas') + if 'replicated' in self: + return self['replicated'].get('Replicas') + + if 'ReplicatedJob' in self: + return self['ReplicatedJob'].get('TotalCompletions') + + return None class SecretReference(dict): @@ -659,10 +723,13 @@ class Placement(dict): are provided in order from highest to lowest precedence and are expressed as ``(strategy, descriptor)`` tuples. See :py:class:`PlacementPreference` for details. + maxreplicas (int): Maximum number of replicas per node platforms (:py:class:`list` of tuple): A list of platforms expressed as ``(arch, os)`` tuples """ - def __init__(self, constraints=None, preferences=None, platforms=None): + + def __init__(self, constraints=None, preferences=None, platforms=None, + maxreplicas=None): if constraints is not None: self['Constraints'] = constraints if preferences is not None: @@ -671,6 +738,8 @@ class Placement(dict): if isinstance(pref, tuple): pref = PlacementPreference(*pref) self['Preferences'].append(pref) + if maxreplicas is not None: + self['MaxReplicas'] = maxreplicas if platforms: self['Platforms'] = [] for plat in platforms: @@ -691,6 +760,7 @@ class PlacementPreference(dict): the scheduler will try to spread tasks evenly over groups of nodes identified by this label. """ + def __init__(self, strategy, descriptor): if strategy != 'spread': raise errors.InvalidArgument( @@ -712,6 +782,7 @@ class DNSConfig(dict): options (:py:class:`list`): A list of internal resolver variables to be modified (e.g., ``debug``, ``ndots:3``, etc.). """ + def __init__(self, nameservers=None, search=None, options=None): self['Nameservers'] = nameservers self['Search'] = search @@ -742,6 +813,7 @@ class Privileges(dict): selinux_type (string): SELinux type label selinux_level (string): SELinux level label """ + def __init__(self, credentialspec_file=None, credentialspec_registry=None, selinux_disable=None, selinux_user=None, selinux_role=None, selinux_type=None, selinux_level=None): @@ -784,6 +856,7 @@ class NetworkAttachmentConfig(dict): options (:py:class:`dict`): Driver attachment options for the network target. """ + def __init__(self, target, aliases=None, options=None): self['Target'] = target self['Aliases'] = aliases diff --git a/docker/utils/build.py b/docker/utils/build.py index 4fa5751..59564c4 100644 --- a/docker/utils/build.py +++ b/docker/utils/build.py @@ -4,8 +4,6 @@ import re import tarfile import tempfile -import six - from .fnmatch import fnmatch from ..constants import IS_WINDOWS_PLATFORM @@ -69,7 +67,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False, t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj) if files is None: files = build_file_list(root) - extra_names = set(e[0] for e in extra_files) + extra_names = {e[0] for e in extra_files} for path in files: if path in extra_names: # Extra files override context files with the same name @@ -95,9 +93,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False, try: with open(full_path, 'rb') as f: t.addfile(i, f) - except IOError: - raise IOError( - 'Can not read file in context: {}'.format(full_path) + except OSError: + raise OSError( + f'Can not read file in context: {full_path}' ) else: # Directories, FIFOs, symlinks... don't need to be read. @@ -105,8 +103,9 @@ def create_archive(root, files=None, fileobj=None, gzip=False, for name, contents in extra_files: info = tarfile.TarInfo(name) - info.size = len(contents) - t.addfile(info, io.BytesIO(contents.encode('utf-8'))) + contents_encoded = contents.encode('utf-8') + info.size = len(contents_encoded) + t.addfile(info, io.BytesIO(contents_encoded)) t.close() fileobj.seek(0) @@ -118,12 +117,8 @@ def mkbuildcontext(dockerfile): t = tarfile.open(mode='w', fileobj=f) if isinstance(dockerfile, io.StringIO): dfinfo = tarfile.TarInfo('Dockerfile') - if six.PY3: - raise TypeError('Please use io.BytesIO to create in-memory ' - 'Dockerfiles with Python 3') - else: - dfinfo.size = len(dockerfile.getvalue()) - dockerfile.seek(0) + raise TypeError('Please use io.BytesIO to create in-memory ' + 'Dockerfiles with Python 3') elif isinstance(dockerfile, io.BytesIO): dfinfo = tarfile.TarInfo('Dockerfile') dfinfo.size = len(dockerfile.getvalue()) @@ -153,7 +148,7 @@ def walk(root, patterns, default=True): # Heavily based on # https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go -class PatternMatcher(object): +class PatternMatcher: def __init__(self, patterns): self.patterns = list(filter( lambda p: p.dirs, [Pattern(p) for p in patterns] @@ -211,13 +206,12 @@ class PatternMatcher(object): break if skip: continue - for sub in rec_walk(cur): - yield sub + yield from rec_walk(cur) return rec_walk(root) -class Pattern(object): +class Pattern: def __init__(self, pattern_str): self.exclusion = False if pattern_str.startswith('!'): @@ -230,6 +224,9 @@ class Pattern(object): @classmethod def normalize(cls, p): + # Remove trailing spaces + p = p.strip() + # Leading and trailing slashes are not relevant. Yes, # "foo.py/" must exclude the "foo.py" regular file. "." # components are not relevant either, even if the whole diff --git a/docker/utils/config.py b/docker/utils/config.py index 82a0e2a..8e24959 100644 --- a/docker/utils/config.py +++ b/docker/utils/config.py @@ -18,11 +18,11 @@ def find_config_file(config_path=None): os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4 ])) - log.debug("Trying paths: {0}".format(repr(paths))) + log.debug(f"Trying paths: {repr(paths)}") for path in paths: if os.path.exists(path): - log.debug("Found file at path: {0}".format(path)) + log.debug(f"Found file at path: {path}") return path log.debug("No config file found") @@ -57,7 +57,7 @@ def load_general_config(config_path=None): try: with open(config_file) as f: return json.load(f) - except (IOError, ValueError) as e: + except (OSError, ValueError) as e: # In the case of a legacy `.dockercfg` file, we won't # be able to load any JSON data. log.debug(e) diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py index c975d4b..cf1baf4 100644 --- a/docker/utils/decorators.py +++ b/docker/utils/decorators.py @@ -27,7 +27,7 @@ def minimum_version(version): def wrapper(self, *args, **kwargs): if utils.version_lt(self._version, version): raise errors.InvalidVersion( - '{0} is not available for version < {1}'.format( + '{} is not available for version < {}'.format( f.__name__, version ) ) diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py index cc940a2..90e9f60 100644 --- a/docker/utils/fnmatch.py +++ b/docker/utils/fnmatch.py @@ -108,7 +108,7 @@ def translate(pat): stuff = '^' + stuff[1:] elif stuff[0] == '^': stuff = '\\' + stuff - res = '%s[%s]' % (res, stuff) + res = f'{res}[{stuff}]' else: res = res + re.escape(c) diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py index addffdf..f384175 100644 --- a/docker/utils/json_stream.py +++ b/docker/utils/json_stream.py @@ -1,11 +1,6 @@ -from __future__ import absolute_import -from __future__ import unicode_literals - import json import json.decoder -import six - from ..errors import StreamParseError @@ -20,7 +15,7 @@ def stream_as_text(stream): instead of byte streams. """ for data in stream: - if not isinstance(data, six.text_type): + if not isinstance(data, str): data = data.decode('utf-8', 'replace') yield data @@ -46,8 +41,8 @@ def json_stream(stream): return split_buffer(stream, json_splitter, json_decoder.decode) -def line_splitter(buffer, separator=u'\n'): - index = buffer.find(six.text_type(separator)) +def line_splitter(buffer, separator='\n'): + index = buffer.find(str(separator)) if index == -1: return None return buffer[:index + 1], buffer[index + 1:] @@ -61,7 +56,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a): of the input. """ splitter = splitter or line_splitter - buffered = six.text_type('') + buffered = '' for data in stream_as_text(stream): buffered += data diff --git a/docker/utils/ports.py b/docker/utils/ports.py index a50cc02..e813936 100644 --- a/docker/utils/ports.py +++ b/docker/utils/ports.py @@ -3,7 +3,7 @@ import re PORT_SPEC = re.compile( "^" # Match full string "(" # External part - r"((?P<host>[a-fA-F\d.:]+):)?" # Address + r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range ")?" r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range @@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False): if not end: return [start + proto] if randomly_available_port: - return ['{}-{}'.format(start, end) + proto] + return [f'{start}-{end}' + proto] return [str(port) + proto for port in range(int(start), int(end) + 1)] diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 7ba9505..4a2076e 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -4,8 +4,6 @@ import select import socket as pysocket import struct -import six - try: from ..transport import NpipeSocket except ImportError: @@ -27,16 +25,16 @@ def read(socket, n=4096): recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) - if six.PY3 and not isinstance(socket, NpipeSocket): + if not isinstance(socket, NpipeSocket): select.select([socket], [], []) try: if hasattr(socket, 'recv'): return socket.recv(n) - if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')): + if isinstance(socket, getattr(pysocket, 'SocketIO')): return socket.read(n) return os.read(socket.fileno(), n) - except EnvironmentError as e: + except OSError as e: if e.errno not in recoverable_errors: raise @@ -46,7 +44,7 @@ def read_exactly(socket, n): Reads exactly n bytes from socket Raises SocketError if there isn't enough data """ - data = six.binary_type() + data = bytes() while len(data) < n: next_data = read(socket, n - len(data)) if not next_data: @@ -134,7 +132,7 @@ def consume_socket_output(frames, demux=False): if demux is False: # If the streams are multiplexed, the generator returns strings, that # we just need to concatenate. - return six.binary_type().join(frames) + return bytes().join(frames) # If the streams are demultiplexed, the generator yields tuples # (stdout, stderr) @@ -166,4 +164,4 @@ def demux_adaptor(stream_id, data): elif stream_id == STDERR: return (None, data) else: - raise ValueError('{0} is not a valid stream'.format(stream_id)) + raise ValueError(f'{stream_id} is not a valid stream') diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 7819ace..7b2bbf4 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -1,33 +1,27 @@ import base64 +import collections import json import os import os.path import shlex import string from datetime import datetime -from distutils.version import StrictVersion - -import six +from packaging.version import Version from .. import errors -from .. import tls +from ..constants import DEFAULT_HTTP_HOST +from ..constants import DEFAULT_UNIX_SOCKET +from ..constants import DEFAULT_NPIPE +from ..constants import BYTE_UNITS +from ..tls import TLSConfig -if six.PY2: - from urllib import splitnport - from urlparse import urlparse -else: - from urllib.parse import splitnport, urlparse +from urllib.parse import urlparse, urlunparse -DEFAULT_HTTP_HOST = "127.0.0.1" -DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" -DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' -BYTE_UNITS = { - 'b': 1, - 'k': 1024, - 'm': 1024 * 1024, - 'g': 1024 * 1024 * 1024 -} +URLComponents = collections.namedtuple( + 'URLComponents', + 'scheme netloc url params query fragment', +) def create_ipam_pool(*args, **kwargs): @@ -46,8 +40,7 @@ def create_ipam_config(*args, **kwargs): def decode_json_header(header): data = base64.b64decode(header) - if six.PY3: - data = data.decode('utf-8') + data = data.decode('utf-8') return json.loads(data) @@ -63,8 +56,8 @@ def compare_version(v1, v2): >>> compare_version(v2, v2) 0 """ - s1 = StrictVersion(v1) - s2 = StrictVersion(v2) + s1 = Version(v1) + s2 = Version(v2) if s1 == s2: return 0 elif s1 > s2: @@ -87,7 +80,7 @@ def _convert_port_binding(binding): if len(binding) == 2: result['HostPort'] = binding[1] result['HostIp'] = binding[0] - elif isinstance(binding[0], six.string_types): + elif isinstance(binding[0], str): result['HostIp'] = binding[0] else: result['HostPort'] = binding[0] @@ -111,7 +104,7 @@ def _convert_port_binding(binding): def convert_port_bindings(port_bindings): result = {} - for k, v in six.iteritems(port_bindings): + for k, v in iter(port_bindings.items()): key = str(k) if '/' not in key: key += '/tcp' @@ -128,7 +121,7 @@ def convert_volume_binds(binds): result = [] for k, v in binds.items(): - if isinstance(k, six.binary_type): + if isinstance(k, bytes): k = k.decode('utf-8') if isinstance(v, dict): @@ -139,7 +132,7 @@ def convert_volume_binds(binds): ) bind = v['bind'] - if isinstance(bind, six.binary_type): + if isinstance(bind, bytes): bind = bind.decode('utf-8') if 'ro' in v: @@ -150,13 +143,13 @@ def convert_volume_binds(binds): mode = 'rw' result.append( - six.text_type('{0}:{1}:{2}').format(k, bind, mode) + f'{k}:{bind}:{mode}' ) else: - if isinstance(v, six.binary_type): + if isinstance(v, bytes): v = v.decode('utf-8') result.append( - six.text_type('{0}:{1}:rw').format(k, v) + f'{k}:{v}:rw' ) return result @@ -173,7 +166,7 @@ def convert_tmpfs_mounts(tmpfs): result = {} for mount in tmpfs: - if isinstance(mount, six.string_types): + if isinstance(mount, str): if ":" in mount: name, options = mount.split(":", 1) else: @@ -198,7 +191,7 @@ def convert_service_networks(networks): result = [] for n in networks: - if isinstance(n, six.string_types): + if isinstance(n, str): n = {'Target': n} result.append(n) return result @@ -215,10 +208,6 @@ def parse_repository_tag(repo_name): def parse_host(addr, is_win32=False, tls=False): - path = '' - port = None - host = None - # Sensible defaults if not addr and is_win32: return DEFAULT_NPIPE @@ -247,14 +236,14 @@ def parse_host(addr, is_win32=False, tls=False): if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( - "Invalid bind address protocol: {}".format(addr) + f"Invalid bind address protocol: {addr}" ) if proto == 'tcp' and not parsed_url.netloc: # "tcp://" is exceptionally disallowed by convention; # omitting a hostname for other protocols is fine raise errors.DockerException( - 'Invalid bind address format: {}'.format(addr) + f'Invalid bind address format: {addr}' ) if any([ @@ -262,7 +251,7 @@ def parse_host(addr, is_win32=False, tls=False): parsed_url.password ]): raise errors.DockerException( - 'Invalid bind address format: {}'.format(addr) + f'Invalid bind address format: {addr}' ) if parsed_url.path and proto == 'ssh': @@ -277,20 +266,20 @@ def parse_host(addr, is_win32=False, tls=False): # to be valid and equivalent to unix:///path path = '/'.join((parsed_url.hostname, path)) + netloc = parsed_url.netloc if proto in ('tcp', 'ssh'): - # parsed_url.hostname strips brackets from IPv6 addresses, - # which can be problematic hence our use of splitnport() instead. - host, port = splitnport(parsed_url.netloc) - if port is None or port < 0: + port = parsed_url.port or 0 + if port <= 0: if proto != 'ssh': raise errors.DockerException( 'Invalid bind address format: port is required:' ' {}'.format(addr) ) port = 22 + netloc = f'{parsed_url.netloc}:{port}' - if not host: - host = DEFAULT_HTTP_HOST + if not parsed_url.hostname: + netloc = f'{DEFAULT_HTTP_HOST}:{port}' # Rewrite schemes to fit library internals (requests adapters) if proto == 'tcp': @@ -299,8 +288,16 @@ def parse_host(addr, is_win32=False, tls=False): proto = 'http+unix' if proto in ('http+unix', 'npipe'): - return "{}://{}".format(proto, path).rstrip('/') - return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/') + return f"{proto}://{path}".rstrip('/') + + return urlunparse(URLComponents( + scheme=proto, + netloc=netloc, + url=path, + params='', + query='', + fragment='', + )).rstrip('/') def parse_devices(devices): @@ -309,9 +306,9 @@ def parse_devices(devices): if isinstance(device, dict): device_list.append(device) continue - if not isinstance(device, six.string_types): + if not isinstance(device, str): raise errors.DockerException( - 'Invalid device type {0}'.format(type(device)) + f'Invalid device type {type(device)}' ) device_mapping = device.split(':') if device_mapping: @@ -365,7 +362,7 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): # so if it's not set already then set it to false. assert_hostname = False - params['tls'] = tls.TLSConfig( + params['tls'] = TLSConfig( client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ca_cert=os.path.join(cert_path, 'ca.pem'), @@ -379,13 +376,13 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): def convert_filters(filters): result = {} - for k, v in six.iteritems(filters): + for k, v in iter(filters.items()): if isinstance(v, bool): v = 'true' if v else 'false' if not isinstance(v, list): v = [v, ] result[k] = [ - str(item) if not isinstance(item, six.string_types) else item + str(item) if not isinstance(item, str) else item for item in v ] return json.dumps(result) @@ -398,7 +395,7 @@ def datetime_to_timestamp(dt): def parse_bytes(s): - if isinstance(s, six.integer_types + (float,)): + if isinstance(s, (int, float,)): return s if len(s) == 0: return 0 @@ -419,10 +416,10 @@ def parse_bytes(s): if suffix in units.keys() or suffix.isdigit(): try: - digits = int(digits_part) + digits = float(digits_part) except ValueError: raise errors.DockerException( - 'Failed converting the string value for memory ({0}) to' + 'Failed converting the string value for memory ({}) to' ' an integer.'.format(digits_part) ) @@ -430,7 +427,7 @@ def parse_bytes(s): s = int(digits * units[suffix]) else: raise errors.DockerException( - 'The specified value for memory ({0}) should specify the' + 'The specified value for memory ({}) should specify the' ' units. The postfix should be one of the `b` `k` `m` `g`' ' characters'.format(s) ) @@ -440,9 +437,9 @@ def parse_bytes(s): def normalize_links(links): if isinstance(links, dict): - links = six.iteritems(links) + links = iter(links.items()) - return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)] + return [f'{k}:{v}' if v else k for k, v in sorted(links)] def parse_env_file(env_file): @@ -452,7 +449,7 @@ def parse_env_file(env_file): """ environment = {} - with open(env_file, 'r') as f: + with open(env_file) as f: for line in f: if line[0] == '#': @@ -468,15 +465,13 @@ def parse_env_file(env_file): environment[k] = v else: raise errors.DockerException( - 'Invalid line in environment file {0}:\n{1}'.format( + 'Invalid line in environment file {}:\n{}'.format( env_file, line)) return environment def split_command(command): - if six.PY2 and not isinstance(command, six.binary_type): - command = command.encode('utf-8') return shlex.split(command) @@ -484,22 +479,22 @@ def format_environment(environment): def format_env(key, value): if value is None: return key - if isinstance(value, six.binary_type): + if isinstance(value, bytes): value = value.decode('utf-8') - return u'{key}={value}'.format(key=key, value=value) - return [format_env(*var) for var in six.iteritems(environment)] + return f'{key}={value}' + return [format_env(*var) for var in iter(environment.items())] def format_extra_hosts(extra_hosts, task=False): # Use format dictated by Swarm API if container is part of a task if task: return [ - '{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts)) + f'{v} {k}' for k, v in sorted(iter(extra_hosts.items())) ] return [ - '{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts)) + f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items())) ] diff --git a/docker/version.py b/docker/version.py index 2124925..44eac8c 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,14 @@ -version = "4.1.0-dev" -version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) +try: + from ._version import __version__ +except ImportError: + try: + # importlib.metadata available in Python 3.8+, the fallback (0.0.0) + # is fine because release builds use _version (above) rather than + # this code path, so it only impacts developing w/ 3.7 + from importlib.metadata import version, PackageNotFoundError + try: + __version__ = version('docker') + except PackageNotFoundError: + __version__ = '0.0.0' + except ImportError: + __version__ = '0.0.0' |