add docker examples
This commit is contained in:
@ -0,0 +1,107 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
|
||||
REQUESTS_IMPORT_ERROR = None
|
||||
URLLIB3_IMPORT_ERROR = None
|
||||
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = None
|
||||
|
||||
|
||||
try:
|
||||
from requests import Session # noqa: F401, pylint: disable=unused-import
|
||||
from requests.adapters import HTTPAdapter # noqa: F401, pylint: disable=unused-import
|
||||
from requests.exceptions import HTTPError, InvalidSchema # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
REQUESTS_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
class Session(object):
|
||||
__attrs__ = []
|
||||
|
||||
class HTTPAdapter(object):
|
||||
__attrs__ = []
|
||||
|
||||
class HTTPError(Exception):
|
||||
pass
|
||||
|
||||
class InvalidSchema(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
from requests.packages import urllib3
|
||||
from requests.packages.urllib3 import connection as urllib3_connection # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
try:
|
||||
import urllib3
|
||||
from urllib3 import connection as urllib3_connection # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
URLLIB3_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
class _HTTPConnectionPool(object):
|
||||
pass
|
||||
|
||||
class _HTTPConnection(object):
|
||||
pass
|
||||
|
||||
class FakeURLLIB3(object):
|
||||
def __init__(self):
|
||||
self._collections = self
|
||||
self.poolmanager = self
|
||||
self.connection = self
|
||||
self.connectionpool = self
|
||||
|
||||
self.RecentlyUsedContainer = object()
|
||||
self.PoolManager = object()
|
||||
self.match_hostname = object()
|
||||
self.HTTPConnectionPool = _HTTPConnectionPool
|
||||
|
||||
class FakeURLLIB3Connection(object):
|
||||
def __init__(self):
|
||||
self.HTTPConnection = _HTTPConnection
|
||||
|
||||
urllib3 = FakeURLLIB3()
|
||||
urllib3_connection = FakeURLLIB3Connection()
|
||||
|
||||
|
||||
# Monkey-patching match_hostname with a version that supports
|
||||
# IP-address checking. Not necessary for Python 3.5 and above
|
||||
if PY2:
|
||||
try:
|
||||
from backports.ssl_match_hostname import match_hostname
|
||||
urllib3.connection.match_hostname = match_hostname
|
||||
except ImportError:
|
||||
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
def fail_on_missing_imports():
|
||||
if REQUESTS_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install requests',
|
||||
'requests', REQUESTS_IMPORT_ERROR)
|
||||
if URLLIB3_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install urllib3',
|
||||
'urllib3', URLLIB3_IMPORT_ERROR)
|
||||
if BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install backports.ssl-match-hostname',
|
||||
'backports.ssl-match-hostname', BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR)
|
@ -0,0 +1,613 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import logging
|
||||
import struct
|
||||
from functools import partial
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type, iteritems, string_types, raise_from
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
|
||||
from .. import auth
|
||||
from .._import_helper import fail_on_missing_imports
|
||||
from .._import_helper import HTTPError as _HTTPError
|
||||
from .._import_helper import InvalidSchema as _InvalidSchema
|
||||
from .._import_helper import Session as _Session
|
||||
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
|
||||
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
|
||||
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES,
|
||||
DEFAULT_DATA_CHUNK_SIZE)
|
||||
from ..errors import (DockerException, InvalidVersion, TLSParameterError, MissingRequirementException,
|
||||
create_api_error_from_http_exception)
|
||||
from ..tls import TLSConfig
|
||||
from ..transport.npipeconn import NpipeHTTPAdapter
|
||||
from ..transport.npipesocket import PYWIN32_IMPORT_ERROR
|
||||
from ..transport.unixconn import UnixHTTPAdapter
|
||||
from ..transport.sshconn import SSHHTTPAdapter, PARAMIKO_IMPORT_ERROR
|
||||
from ..transport.ssladapter import SSLHTTPAdapter
|
||||
from ..utils import config, utils, json_stream
|
||||
from ..utils.decorators import check_resource, update_headers
|
||||
from ..utils.proxy import ProxyConfig
|
||||
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
|
||||
|
||||
from .daemon import DaemonApiMixin
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIClient(
|
||||
_Session,
|
||||
DaemonApiMixin):
|
||||
"""
|
||||
A low-level client for the Docker Engine API.
|
||||
|
||||
Example:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
|
||||
>>> client.version()
|
||||
{u'ApiVersion': u'1.33',
|
||||
u'Arch': u'amd64',
|
||||
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
|
||||
u'GitCommit': u'f4ffd2511c',
|
||||
u'GoVersion': u'go1.9.2',
|
||||
u'KernelVersion': u'4.14.3-1-ARCH',
|
||||
u'MinAPIVersion': u'1.12',
|
||||
u'Os': u'linux',
|
||||
u'Version': u'17.10.0-ce'}
|
||||
|
||||
Args:
|
||||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
:py:class:`~docker.tls.TLSConfig` object to use custom
|
||||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
credstore_env (dict): Override environment variables when calling the
|
||||
credential store process.
|
||||
use_ssh_client (bool): If set to `True`, an ssh connection is made
|
||||
via shelling out to the ssh client. Ensure the ssh client is
|
||||
installed and configured on the host.
|
||||
max_pool_size (int): The maximum number of connections
|
||||
to save in the pool.
|
||||
"""
|
||||
|
||||
__attrs__ = _Session.__attrs__ + ['_auth_configs',
|
||||
'_general_configs',
|
||||
'_version',
|
||||
'base_url',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=DEFAULT_USER_AGENT, num_pools=None,
|
||||
credstore_env=None, use_ssh_client=False,
|
||||
max_pool_size=DEFAULT_MAX_POOL_SIZE):
|
||||
super(APIClient, self).__init__()
|
||||
|
||||
fail_on_missing_imports()
|
||||
|
||||
if tls and not base_url:
|
||||
raise TLSParameterError(
|
||||
'If using TLS, the base_url argument must be provided.'
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.headers['User-Agent'] = user_agent
|
||||
|
||||
self._general_configs = config.load_general_config()
|
||||
|
||||
proxy_config = self._general_configs.get('proxies', {})
|
||||
try:
|
||||
proxies = proxy_config[base_url]
|
||||
except KeyError:
|
||||
proxies = proxy_config.get('default', {})
|
||||
|
||||
self._proxy_configs = ProxyConfig.from_dict(proxies)
|
||||
|
||||
self._auth_configs = auth.load_config(
|
||||
config_dict=self._general_configs, credstore_env=credstore_env,
|
||||
)
|
||||
self.credstore_env = credstore_env
|
||||
|
||||
base_url = utils.parse_host(
|
||||
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
|
||||
)
|
||||
# SSH has a different default for num_pools to all other adapters
|
||||
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
|
||||
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
|
||||
|
||||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
# host part of URL should be unused, but is resolved by requests
|
||||
# module in proxy_bypass_macosx_sysconf()
|
||||
self.base_url = 'http+docker://localhost'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
raise DockerException(
|
||||
'The npipe:// protocol is only supported on Windows'
|
||||
)
|
||||
if PYWIN32_IMPORT_ERROR is not None:
|
||||
raise MissingRequirementException(
|
||||
'Install pypiwin32 package to enable npipe:// support',
|
||||
'pywin32',
|
||||
PYWIN32_IMPORT_ERROR)
|
||||
self._custom_adapter = NpipeHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
elif base_url.startswith('ssh://'):
|
||||
if PARAMIKO_IMPORT_ERROR is not None and not use_ssh_client:
|
||||
raise MissingRequirementException(
|
||||
'Install paramiko package to enable ssh:// support',
|
||||
'paramiko',
|
||||
PARAMIKO_IMPORT_ERROR)
|
||||
self._custom_adapter = SSHHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size, shell_out=use_ssh_client
|
||||
)
|
||||
self.mount('http+docker://ssh', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://ssh'
|
||||
else:
|
||||
# Use SSLAdapter for the ability to specify SSL version
|
||||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = SSLHTTPAdapter(
|
||||
pool_connections=num_pools)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
||||
# version detection needs to be after unix adapter mounting
|
||||
if version is None or (isinstance(version, string_types) and version.lower() == 'auto'):
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
if not isinstance(self._version, string_types):
|
||||
raise DockerException(
|
||||
'Version parameter must be a string or None. Found {0}'.format(
|
||||
type(version).__name__
|
||||
)
|
||||
)
|
||||
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
|
||||
raise InvalidVersion(
|
||||
'API versions below {0} are no longer supported by this '
|
||||
'library.'.format(MINIMUM_DOCKER_API_VERSION)
|
||||
)
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
version_result = self.version(api_version=False)
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}'.format(e)
|
||||
)
|
||||
|
||||
try:
|
||||
return version_result["ApiVersion"]
|
||||
except KeyError:
|
||||
raise DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
)
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}. Response seems to be broken.'.format(e)
|
||||
)
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self.timeout)
|
||||
return kwargs
|
||||
|
||||
@update_headers
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _head(self, url, **kwargs):
|
||||
return self.head(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _put(self, url, **kwargs):
|
||||
return self.put(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, string_types):
|
||||
raise ValueError(
|
||||
'Expected a string but found {0} ({1}) '
|
||||
'instead'.format(arg, type(arg))
|
||||
)
|
||||
|
||||
quote_f = partial(quote, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
self.base_url, self._version, pathfmt.format(*args)
|
||||
)
|
||||
else:
|
||||
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
|
||||
|
||||
def _raise_for_status(self, response):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except _HTTPError as e:
|
||||
raise_from(create_api_error_from_http_exception(e), e)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
if json and binary:
|
||||
raise AssertionError('json and binary must not be both True')
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 cannot unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None and isinstance(data, dict):
|
||||
for k, v in iteritems(data):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
elif data is not None:
|
||||
data2 = data
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
if self.base_url == "http+docker://localnpipe":
|
||||
sock = response.raw._fp.fp.raw.sock
|
||||
elif self.base_url.startswith('http+docker://ssh'):
|
||||
sock = response.raw._fp.fp.channel
|
||||
elif PY3:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
else:
|
||||
sock = response.raw._fp.fp._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
# close TLS sockets.
|
||||
sock._response = response
|
||||
except AttributeError:
|
||||
# UNIX sockets cannot have attributes set on them, but that's
|
||||
# fine because we will not be doing TLS over them
|
||||
pass
|
||||
|
||||
return sock
|
||||
|
||||
def _stream_helper(self, response, decode=False):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
|
||||
if response.raw._fp.chunked:
|
||||
if decode:
|
||||
for chunk in json_stream.json_stream(self._stream_helper(response, False)):
|
||||
yield chunk
|
||||
else:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
# this read call will block until we get a chunk
|
||||
data = reader.read(1)
|
||||
if not data:
|
||||
break
|
||||
if reader._fp.chunk_left:
|
||||
data += reader.read(reader._fp.chunk_left)
|
||||
yield data
|
||||
else:
|
||||
# Response is not chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
buf_length = len(buf)
|
||||
walker = 0
|
||||
while True:
|
||||
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
|
||||
break
|
||||
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
|
||||
dummy, length = struct.unpack_from('>BxxxL', header)
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_response_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
stream."""
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
while True:
|
||||
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
dummy, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
continue
|
||||
data = response.raw.read(length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _stream_raw_result(self, response, chunk_size=1, decode=True):
|
||||
''' Stream result for TTY-enabled container and raw binary data'''
|
||||
self._raise_for_status(response)
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
for out in response.iter_content(chunk_size, decode):
|
||||
yield out
|
||||
|
||||
def _read_from_socket(self, response, stream, tty=True, demux=False):
|
||||
"""Consume all data from the socket, close the response and return the
|
||||
data. If stream=True, then a generator is returned instead and the
|
||||
caller is responsible for closing the response.
|
||||
"""
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
gen = frames_iter(socket, tty)
|
||||
|
||||
if demux:
|
||||
# The generator will output tuples (stdout, stderr)
|
||||
gen = (demux_adaptor(*frame) for frame in gen)
|
||||
else:
|
||||
# The generator will output strings
|
||||
gen = (data for (dummy, data) in gen)
|
||||
|
||||
if stream:
|
||||
return gen
|
||||
else:
|
||||
try:
|
||||
# Wait for all the frames, concatenate them, and return the result
|
||||
return consume_socket_output(gen, demux=demux)
|
||||
finally:
|
||||
response.close()
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we are
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
may or may not exist; or we may need to just settimeout on socket
|
||||
itself, which also may or may not have settimeout on it. To avoid
|
||||
missing the correct one, we try both.
|
||||
|
||||
We also do not want to set the timeout if it is already disabled, as
|
||||
you run the risk of changing a socket that was non-blocking to
|
||||
blocking, for example when using gevent.
|
||||
"""
|
||||
sockets = [socket, getattr(socket, '_sock', None)]
|
||||
|
||||
for s in sockets:
|
||||
if not hasattr(s, 'settimeout'):
|
||||
continue
|
||||
|
||||
timeout = -1
|
||||
|
||||
if hasattr(s, 'gettimeout'):
|
||||
timeout = s.gettimeout()
|
||||
|
||||
# Do not change the timeout if it is already disabled.
|
||||
if timeout is None or timeout == 0.0:
|
||||
continue
|
||||
|
||||
s.settimeout(None)
|
||||
|
||||
@check_resource('container')
|
||||
def _check_is_tty(self, container):
|
||||
cont = self.inspect_container(container)
|
||||
return cont['Config']['Tty']
|
||||
|
||||
def _get_result(self, container, stream, res):
|
||||
return self._get_result_tty(stream, res, self._check_is_tty(container))
|
||||
|
||||
def _get_result_tty(self, stream, res, is_tty):
|
||||
# We should also use raw streaming (without keep-alive)
|
||||
# if we are dealing with a tty-enabled container.
|
||||
if is_tty:
|
||||
return self._stream_raw_result(res) if stream else \
|
||||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = binary_type()
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
list(self._multiplexed_buffer_helper(res))
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(APIClient, self).get_adapter(url)
|
||||
except _InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
else:
|
||||
raise e
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self._version
|
||||
|
||||
def reload_config(self, dockercfg_path=None):
|
||||
"""
|
||||
Force a reload of the auth configuration
|
||||
|
||||
Args:
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
def _set_auth_headers(self, headers):
|
||||
log.debug('Looking for auth config')
|
||||
|
||||
# If we do not have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs or self._auth_configs.is_empty:
|
||||
log.debug("No auth config in memory - loading from filesystem")
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
# Send the full auth configuration (if any exists), since the build
|
||||
# could use any (or all) of the registries.
|
||||
if self._auth_configs:
|
||||
auth_data = self._auth_configs.get_all_credentials()
|
||||
|
||||
# See https://github.com/docker/docker-py/issues/1683
|
||||
if (auth.INDEX_URL not in auth_data and
|
||||
auth.INDEX_NAME in auth_data):
|
||||
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
|
||||
|
||||
log.debug(
|
||||
'Sending auth config (%s)',
|
||||
', '.join(repr(k) for k in auth_data.keys())
|
||||
)
|
||||
|
||||
if auth_data:
|
||||
headers['X-Registry-Config'] = auth.encode_header(
|
||||
auth_data
|
||||
)
|
||||
else:
|
||||
log.debug('No auth config found')
|
||||
|
||||
def get_binary(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), binary=True)
|
||||
|
||||
def get_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
||||
|
||||
def get_text(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def get_raw_stream(self, pathfmt, *args, **kwargs):
|
||||
chunk_size = kwargs.pop('chunk_size', DEFAULT_DATA_CHUNK_SIZE)
|
||||
res = self._get(self._url(pathfmt, *args, versioned_api=True), stream=True, **kwargs)
|
||||
self._raise_for_status(res)
|
||||
return self._stream_raw_result(res, chunk_size, False)
|
||||
|
||||
def delete_call(self, pathfmt, *args, **kwargs):
|
||||
self._raise_for_status(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def delete_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
||||
|
||||
def post_call(self, pathfmt, *args, **kwargs):
|
||||
self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def post_json(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs))
|
||||
|
||||
def post_json_to_binary(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), binary=True)
|
||||
|
||||
def post_json_to_json(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), json=True)
|
||||
|
||||
def post_json_to_text(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
|
||||
def post_json_to_stream_socket(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
headers = (kwargs.pop('headers', None) or {}).copy()
|
||||
headers.update({
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp',
|
||||
})
|
||||
return self._get_raw_response_socket(
|
||||
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs))
|
||||
|
||||
def post_json_to_stream(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
headers = (kwargs.pop('headers', None) or {}).copy()
|
||||
headers.update({
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp',
|
||||
})
|
||||
stream = kwargs.pop('stream', False)
|
||||
demux = kwargs.pop('demux', False)
|
||||
tty = kwargs.pop('tty', False)
|
||||
return self._read_from_socket(
|
||||
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs),
|
||||
stream,
|
||||
tty=tty,
|
||||
demux=demux
|
||||
)
|
||||
|
||||
def post_to_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
@ -0,0 +1,134 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from .. import auth
|
||||
from ..utils.decorators import minimum_version
|
||||
|
||||
|
||||
class DaemonApiMixin(object):
|
||||
@minimum_version('1.25')
|
||||
def df(self):
|
||||
"""
|
||||
Get data usage information.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary representing different resource categories
|
||||
and their respective data usage.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/system/df')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
def info(self):
|
||||
"""
|
||||
Display system-wide information. Identical to the ``docker info``
|
||||
command.
|
||||
|
||||
Returns:
|
||||
(dict): The info as a dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url("/info")), True)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False, dockercfg_path=None):
|
||||
"""
|
||||
Authenticate with a registry. Similar to the ``docker login`` command.
|
||||
|
||||
Args:
|
||||
username (str): The registry username
|
||||
password (str): The plaintext password
|
||||
email (str): The email for the registry account
|
||||
registry (str): URL to the registry. E.g.
|
||||
``https://index.docker.io/v1/``
|
||||
reauth (bool): Whether or not to refresh existing authentication on
|
||||
the Docker server.
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
(dict): The response from the login request
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
# If we do not have any auth data so far, try reloading the config file
|
||||
# one more time in case anything showed up in there.
|
||||
# If dockercfg_path is passed check to see if the config file exists,
|
||||
# if so load that config.
|
||||
if dockercfg_path and os.path.exists(dockercfg_path):
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
elif not self._auth_configs or self._auth_configs.is_empty:
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
authcfg = self._auth_configs.resolve_authconfig(registry)
|
||||
# If we found an existing auth config for this registry and username
|
||||
# combination, we can return it immediately unless reauth is requested.
|
||||
if authcfg and authcfg.get('username', None) == username \
|
||||
and not reauth:
|
||||
return authcfg
|
||||
|
||||
req_data = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': email,
|
||||
'serveraddress': registry,
|
||||
}
|
||||
|
||||
response = self._post_json(self._url('/auth'), data=req_data)
|
||||
if response.status_code == 200:
|
||||
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
|
||||
return self._result(response, json=True)
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
Checks the server is responsive. An exception will be raised if it
|
||||
is not responding.
|
||||
|
||||
Returns:
|
||||
(bool) The response from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url('/_ping'))) == 'OK'
|
||||
|
||||
def version(self, api_version=True):
|
||||
"""
|
||||
Returns version information from the server. Similar to the ``docker
|
||||
version`` command.
|
||||
|
||||
Returns:
|
||||
(dict): The server version information
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/version", versioned_api=api_version)
|
||||
return self._result(self._get(url), json=True)
|
@ -0,0 +1,388 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
|
||||
from ansible.module_utils.six import iteritems, string_types
|
||||
|
||||
from . import errors
|
||||
from .credentials.store import Store
|
||||
from .credentials.errors import StoreError, CredentialsNotFound
|
||||
from .utils import config
|
||||
|
||||
INDEX_NAME = 'docker.io'
|
||||
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
|
||||
TOKEN_USERNAME = '<token>'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise errors.InvalidRepository(
|
||||
'Repository name cannot contain a scheme ({0})'.format(repo_name)
|
||||
)
|
||||
|
||||
index_name, remote_name = split_repo_name(repo_name)
|
||||
if index_name[0] == '-' or index_name[-1] == '-':
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid index name ({0}). Cannot begin or end with a'
|
||||
' hyphen.'.format(index_name)
|
||||
)
|
||||
return resolve_index_name(index_name), remote_name
|
||||
|
||||
|
||||
def resolve_index_name(index_name):
|
||||
index_name = convert_to_hostname(index_name)
|
||||
if index_name == 'index.' + INDEX_NAME:
|
||||
index_name = INDEX_NAME
|
||||
return index_name
|
||||
|
||||
|
||||
def get_config_header(client, registry):
|
||||
log.debug('Looking for auth config')
|
||||
if not client._auth_configs or client._auth_configs.is_empty:
|
||||
log.debug(
|
||||
"No auth config in memory - loading from filesystem"
|
||||
)
|
||||
client._auth_configs = load_config(credstore_env=client.credstore_env)
|
||||
authcfg = resolve_authconfig(
|
||||
client._auth_configs, registry, credstore_env=client.credstore_env
|
||||
)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug('Found auth config')
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
return encode_header(authcfg)
|
||||
log.debug('No auth config found')
|
||||
return None
|
||||
|
||||
|
||||
def split_repo_name(repo_name):
|
||||
parts = repo_name.split('/', 1)
|
||||
if len(parts) == 1 or (
|
||||
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
|
||||
):
|
||||
# This is a docker index repo (ex: username/foobar or ubuntu)
|
||||
return INDEX_NAME, repo_name
|
||||
return tuple(parts)
|
||||
|
||||
|
||||
def get_credential_store(authconfig, registry):
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig)
|
||||
return authconfig.get_credential_store(registry)
|
||||
|
||||
|
||||
class AuthConfig(dict):
|
||||
def __init__(self, dct, credstore_env=None):
|
||||
if 'auths' not in dct:
|
||||
dct['auths'] = {}
|
||||
self.update(dct)
|
||||
self._credstore_env = credstore_env
|
||||
self._stores = {}
|
||||
|
||||
@classmethod
|
||||
def parse_auth(cls, entries, raise_on_error=False):
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
conf = {}
|
||||
for registry, entry in iteritems(entries):
|
||||
if not isinstance(entry, dict):
|
||||
log.debug('Config entry for key %s is not auth config', registry)
|
||||
# We sometimes fall back to parsing the whole config as if it
|
||||
# was the auth config by itself, for legacy purposes. In that
|
||||
# case, we fail silently and return an empty conf if any of the
|
||||
# keys is not formatted properly.
|
||||
if raise_on_error:
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid configuration for registry {0}'.format(
|
||||
registry
|
||||
)
|
||||
)
|
||||
return {}
|
||||
if 'identitytoken' in entry:
|
||||
log.debug('Found an IdentityToken entry for registry %s', registry)
|
||||
conf[registry] = {
|
||||
'IdentityToken': entry['identitytoken']
|
||||
}
|
||||
continue # Other values are irrelevant if we have a token
|
||||
|
||||
if 'auth' not in entry:
|
||||
# Starting with engine v1.11 (API 1.23), an empty dictionary is
|
||||
# a valid value in the auths config.
|
||||
# https://github.com/docker/compose/issues/3265
|
||||
log.debug('Auth data for %s is absent. Client might be using a credentials store instead.', registry)
|
||||
conf[registry] = {}
|
||||
continue
|
||||
|
||||
username, password = decode_auth(entry['auth'])
|
||||
log.debug('Found entry (registry=%s, username=%s)', repr(registry), repr(username))
|
||||
|
||||
conf[registry] = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': entry.get('email'),
|
||||
'serveraddress': registry,
|
||||
}
|
||||
return conf
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, config_path, config_dict, credstore_env=None):
|
||||
"""
|
||||
Loads authentication data from a Docker configuration file in the given
|
||||
root directory or if config_path is passed use given path.
|
||||
Lookup priority:
|
||||
explicit config_path parameter > DOCKER_CONFIG environment
|
||||
variable > ~/.docker/config.json > ~/.dockercfg
|
||||
"""
|
||||
|
||||
if not config_dict:
|
||||
config_file = config.find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return cls({}, credstore_env)
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
config_dict = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
# Likely missing new Docker config file or it is in an
|
||||
# unknown format, continue to attempt to read old location
|
||||
# and format.
|
||||
log.debug(e)
|
||||
return cls(_load_legacy_config(config_file), credstore_env)
|
||||
|
||||
res = {}
|
||||
if config_dict.get('auths'):
|
||||
log.debug("Found 'auths' section")
|
||||
res.update({
|
||||
'auths': cls.parse_auth(
|
||||
config_dict.pop('auths'), raise_on_error=True
|
||||
)
|
||||
})
|
||||
if config_dict.get('credsStore'):
|
||||
log.debug("Found 'credsStore' section")
|
||||
res.update({'credsStore': config_dict.pop('credsStore')})
|
||||
if config_dict.get('credHelpers'):
|
||||
log.debug("Found 'credHelpers' section")
|
||||
res.update({'credHelpers': config_dict.pop('credHelpers')})
|
||||
if res:
|
||||
return cls(res, credstore_env)
|
||||
|
||||
log.debug(
|
||||
"Could not find auth-related section ; attempting to interpret "
|
||||
"as auth-only file"
|
||||
)
|
||||
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
|
||||
|
||||
@property
|
||||
def auths(self):
|
||||
return self.get('auths', {})
|
||||
|
||||
@property
|
||||
def creds_store(self):
|
||||
return self.get('credsStore', None)
|
||||
|
||||
@property
|
||||
def cred_helpers(self):
|
||||
return self.get('credHelpers', {})
|
||||
|
||||
@property
|
||||
def is_empty(self):
|
||||
return (
|
||||
not self.auths and not self.creds_store and not self.cred_helpers
|
||||
)
|
||||
|
||||
def resolve_authconfig(self, registry=None):
|
||||
"""
|
||||
Returns the authentication data from the given auth configuration for a
|
||||
specific registry. As with the Docker client, legacy entries in the
|
||||
config with full URLs are stripped down to hostnames before checking
|
||||
for a match. Returns None if no match was found.
|
||||
"""
|
||||
|
||||
if self.creds_store or self.cred_helpers:
|
||||
store_name = self.get_credential_store(registry)
|
||||
if store_name is not None:
|
||||
log.debug('Using credentials store "%s"', store_name)
|
||||
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||
if cfg is not None:
|
||||
return cfg
|
||||
log.debug('No entry in credstore - fetching from auth dict')
|
||||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
|
||||
if registry in self.auths:
|
||||
log.debug("Found %s", repr(registry))
|
||||
return self.auths[registry]
|
||||
|
||||
for key, conf in iteritems(self.auths):
|
||||
if resolve_index_name(key) == registry:
|
||||
log.debug("Found %s", repr(key))
|
||||
return conf
|
||||
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
|
||||
def _resolve_authconfig_credstore(self, registry, credstore_name):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = INDEX_URL
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
store = self._get_store_instance(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
res = {
|
||||
'ServerAddress': registry,
|
||||
}
|
||||
if data['Username'] == TOKEN_USERNAME:
|
||||
res['IdentityToken'] = data['Secret']
|
||||
else:
|
||||
res.update({
|
||||
'Username': data['Username'],
|
||||
'Password': data['Secret'],
|
||||
})
|
||||
return res
|
||||
except CredentialsNotFound:
|
||||
log.debug('No entry found')
|
||||
return None
|
||||
except StoreError as e:
|
||||
raise errors.DockerException(
|
||||
'Credentials store error: {0}'.format(repr(e))
|
||||
)
|
||||
|
||||
def _get_store_instance(self, name):
|
||||
if name not in self._stores:
|
||||
self._stores[name] = Store(
|
||||
name, environment=self._credstore_env
|
||||
)
|
||||
return self._stores[name]
|
||||
|
||||
def get_credential_store(self, registry):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
registry = INDEX_URL
|
||||
|
||||
return self.cred_helpers.get(registry) or self.creds_store
|
||||
|
||||
def get_all_credentials(self):
|
||||
auth_data = self.auths.copy()
|
||||
if self.creds_store:
|
||||
# Retrieve all credentials from the default store
|
||||
store = self._get_store_instance(self.creds_store)
|
||||
for k in store.list().keys():
|
||||
auth_data[k] = self._resolve_authconfig_credstore(
|
||||
k, self.creds_store
|
||||
)
|
||||
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||
|
||||
# credHelpers entries take priority over all others
|
||||
for reg, store_name in self.cred_helpers.items():
|
||||
auth_data[reg] = self._resolve_authconfig_credstore(
|
||||
reg, store_name
|
||||
)
|
||||
auth_data[convert_to_hostname(reg)] = auth_data[reg]
|
||||
|
||||
return auth_data
|
||||
|
||||
def add_auth(self, reg, data):
|
||||
self['auths'][reg] = data
|
||||
|
||||
|
||||
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig, credstore_env)
|
||||
return authconfig.resolve_authconfig(registry)
|
||||
|
||||
|
||||
def convert_to_hostname(url):
|
||||
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
|
||||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, string_types):
|
||||
auth = auth.encode('ascii')
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b':', 1)
|
||||
return login.decode('utf8'), pwd.decode('utf8')
|
||||
|
||||
|
||||
def encode_header(auth):
|
||||
auth_json = json.dumps(auth).encode('ascii')
|
||||
return base64.urlsafe_b64encode(auth_json)
|
||||
|
||||
|
||||
def parse_auth(entries, raise_on_error=False):
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
return AuthConfig.parse_auth(entries, raise_on_error)
|
||||
|
||||
|
||||
def load_config(config_path=None, config_dict=None, credstore_env=None):
|
||||
return AuthConfig.load_config(config_path, config_dict, credstore_env)
|
||||
|
||||
|
||||
def _load_legacy_config(config_file):
|
||||
log.debug("Attempting to parse legacy auth file format")
|
||||
try:
|
||||
data = []
|
||||
with open(config_file) as f:
|
||||
for line in f.readlines():
|
||||
data.append(line.strip().split(' = ')[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid or empty configuration file!'
|
||||
)
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
return {'auths': {
|
||||
INDEX_NAME: {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': data[1],
|
||||
'serveraddress': INDEX_URL,
|
||||
}
|
||||
}}
|
||||
except Exception as e:
|
||||
log.debug(e)
|
||||
pass
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
|
||||
MINIMUM_DOCKER_API_VERSION = '1.21'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = [
|
||||
'memory', 'memswap', 'cpushares', 'cpusetcpus'
|
||||
]
|
||||
|
||||
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
|
||||
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
|
||||
|
||||
BYTE_UNITS = {
|
||||
'b': 1,
|
||||
'k': 1024,
|
||||
'm': 1024 * 1024,
|
||||
'g': 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
|
||||
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
|
||||
|
||||
DEFAULT_USER_AGENT = "ansible-community.docker"
|
||||
DEFAULT_NUM_POOLS = 25
|
||||
|
||||
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||
# use up to 9, leaving the final session for the underlying SSH connection.
|
||||
# For more details see: https://github.com/docker/docker-py/issues/2246
|
||||
DEFAULT_NUM_POOLS_SSH = 9
|
||||
|
||||
DEFAULT_MAX_POOL_SIZE = 10
|
||||
|
||||
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
@ -0,0 +1,242 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from .. import errors
|
||||
|
||||
from .config import (
|
||||
METAFILE,
|
||||
get_current_context_name,
|
||||
get_meta_dir,
|
||||
write_context_name_to_docker_config,
|
||||
)
|
||||
from .context import Context
|
||||
|
||||
|
||||
def create_default_context():
|
||||
host = None
|
||||
if os.environ.get('DOCKER_HOST'):
|
||||
host = os.environ.get('DOCKER_HOST')
|
||||
return Context("default", "swarm", host, description="Current DOCKER_HOST based configuration")
|
||||
|
||||
|
||||
class ContextAPI(object):
|
||||
"""Context API.
|
||||
Contains methods for context management:
|
||||
create, list, remove, get, inspect.
|
||||
"""
|
||||
DEFAULT_CONTEXT = None
|
||||
|
||||
@classmethod
|
||||
def get_default_context(cls):
|
||||
context = cls.DEFAULT_CONTEXT
|
||||
if context is None:
|
||||
context = create_default_context()
|
||||
cls.DEFAULT_CONTEXT = context
|
||||
return context
|
||||
|
||||
@classmethod
|
||||
def create_context(
|
||||
cls, name, orchestrator=None, host=None, tls_cfg=None,
|
||||
default_namespace=None, skip_tls_verify=False):
|
||||
"""Creates a new context.
|
||||
Returns:
|
||||
(Context): a Context object.
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextAlreadyExists`
|
||||
If a context with the name already exists.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.create_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException(
|
||||
'"default" is a reserved context name')
|
||||
ctx = Context.load_context(name)
|
||||
if ctx:
|
||||
raise errors.ContextAlreadyExists(name)
|
||||
endpoint = "docker"
|
||||
if orchestrator and orchestrator != "swarm":
|
||||
endpoint = orchestrator
|
||||
ctx = Context(name, orchestrator)
|
||||
ctx.set_endpoint(
|
||||
endpoint, host, tls_cfg,
|
||||
skip_tls_verify=skip_tls_verify,
|
||||
def_namespace=default_namespace)
|
||||
ctx.save()
|
||||
return ctx
|
||||
|
||||
@classmethod
|
||||
def get_context(cls, name=None):
|
||||
"""Retrieves a context object.
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.get_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
name = get_current_context_name()
|
||||
if name == "default":
|
||||
return cls.get_default_context()
|
||||
return Context.load_context(name)
|
||||
|
||||
@classmethod
|
||||
def contexts(cls):
|
||||
"""Context list.
|
||||
Returns:
|
||||
(Context): List of context objects.
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If something goes wrong.
|
||||
"""
|
||||
names = []
|
||||
for dirname, dummy, fnames in os.walk(get_meta_dir()):
|
||||
for filename in fnames:
|
||||
if filename == METAFILE:
|
||||
filepath = os.path.join(dirname, filename)
|
||||
try:
|
||||
with open(filepath, "r") as f:
|
||||
data = json.load(f)
|
||||
name = data["Name"]
|
||||
if name == "default":
|
||||
raise ValueError('"default" is a reserved context name')
|
||||
names.append(name)
|
||||
except Exception as e:
|
||||
raise_from(errors.ContextException(
|
||||
"Failed to load metafile {filepath}: {e}".format(filepath=filepath, e=e),
|
||||
), e)
|
||||
|
||||
contexts = [cls.get_default_context()]
|
||||
for name in names:
|
||||
context = Context.load_context(name)
|
||||
if not context:
|
||||
raise errors.ContextException("Context {context} cannot be found".format(context=name))
|
||||
contexts.append(context)
|
||||
return contexts
|
||||
|
||||
@classmethod
|
||||
def get_current_context(cls):
|
||||
"""Get current context.
|
||||
Returns:
|
||||
(Context): current context object.
|
||||
"""
|
||||
return cls.get_context()
|
||||
|
||||
@classmethod
|
||||
def set_current_context(cls, name="default"):
|
||||
ctx = cls.get_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
err = write_context_name_to_docker_config(name)
|
||||
if err:
|
||||
raise errors.ContextException(
|
||||
'Failed to set current context: {err}'.format(err=err))
|
||||
|
||||
@classmethod
|
||||
def remove_context(cls, name):
|
||||
"""Remove a context. Similar to the ``docker context rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException(
|
||||
'context "default" cannot be removed')
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
if name == get_current_context_name():
|
||||
write_context_name_to_docker_config(None)
|
||||
ctx.remove()
|
||||
|
||||
@classmethod
|
||||
def inspect_context(cls, name="default"):
|
||||
"""Inspect a context. Similar to the ``docker context inspect`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
return cls.get_default_context()()
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
return ctx()
|
@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||
from ..utils.config import find_config_file, get_default_config_file
|
||||
from ..utils.utils import parse_host
|
||||
|
||||
METAFILE = "meta.json"
|
||||
|
||||
|
||||
def get_current_context_name_with_source():
|
||||
if os.environ.get('DOCKER_HOST'):
|
||||
return "default", "DOCKER_HOST environment variable set"
|
||||
if os.environ.get('DOCKER_CONTEXT'):
|
||||
return os.environ['DOCKER_CONTEXT'], "DOCKER_CONTEXT environment variable set"
|
||||
docker_cfg_path = find_config_file()
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path) as f:
|
||||
return json.load(f).get("currentContext", "default"), "configuration file {file}".format(file=docker_cfg_path)
|
||||
except Exception:
|
||||
pass
|
||||
return "default", "fallback value"
|
||||
|
||||
|
||||
def get_current_context_name():
|
||||
return get_current_context_name_with_source()[0]
|
||||
|
||||
|
||||
def write_context_name_to_docker_config(name=None):
|
||||
if name == 'default':
|
||||
name = None
|
||||
docker_cfg_path = find_config_file()
|
||||
config = {}
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path) as f:
|
||||
config = json.load(f)
|
||||
except Exception as e:
|
||||
return e
|
||||
current_context = config.get("currentContext", None)
|
||||
if current_context and not name:
|
||||
del config["currentContext"]
|
||||
elif name:
|
||||
config["currentContext"] = name
|
||||
else:
|
||||
return
|
||||
if not docker_cfg_path:
|
||||
docker_cfg_path = get_default_config_file()
|
||||
try:
|
||||
with open(docker_cfg_path, "w") as f:
|
||||
json.dump(config, f, indent=4)
|
||||
except Exception as e:
|
||||
return e
|
||||
|
||||
|
||||
def get_context_id(name):
|
||||
return hashlib.sha256(name.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def get_context_dir():
|
||||
docker_cfg_path = find_config_file() or get_default_config_file()
|
||||
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
|
||||
|
||||
|
||||
def get_meta_dir(name=None):
|
||||
meta_dir = os.path.join(get_context_dir(), "meta")
|
||||
if name:
|
||||
return os.path.join(meta_dir, get_context_id(name))
|
||||
return meta_dir
|
||||
|
||||
|
||||
def get_meta_file(name):
|
||||
return os.path.join(get_meta_dir(name), METAFILE)
|
||||
|
||||
|
||||
def get_tls_dir(name=None, endpoint=""):
|
||||
context_dir = get_context_dir()
|
||||
if name:
|
||||
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
|
||||
return os.path.join(context_dir, "tls")
|
||||
|
||||
|
||||
def get_context_host(path=None, tls=False):
|
||||
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET:
|
||||
# remove http+ from default docker socket url
|
||||
if host.startswith("http+"):
|
||||
host = host[5:]
|
||||
return host
|
@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
from shutil import copyfile, rmtree
|
||||
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from ..errors import ContextException
|
||||
from ..tls import TLSConfig
|
||||
|
||||
from .config import (
|
||||
get_context_host,
|
||||
get_meta_dir,
|
||||
get_meta_file,
|
||||
get_tls_dir,
|
||||
)
|
||||
|
||||
|
||||
IN_MEMORY = "IN MEMORY"
|
||||
|
||||
|
||||
class Context(object):
|
||||
"""A context."""
|
||||
|
||||
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
|
||||
skip_tls_verify=False, tls=False, description=None):
|
||||
if not name:
|
||||
raise Exception("Name not provided")
|
||||
self.name = name
|
||||
self.context_type = None
|
||||
self.orchestrator = orchestrator
|
||||
self.endpoints = {}
|
||||
self.tls_cfg = {}
|
||||
self.meta_path = IN_MEMORY
|
||||
self.tls_path = IN_MEMORY
|
||||
self.description = description
|
||||
|
||||
if not endpoints:
|
||||
# set default docker endpoint if no endpoint is set
|
||||
default_endpoint = "docker" if (
|
||||
not orchestrator or orchestrator == "swarm"
|
||||
) else orchestrator
|
||||
|
||||
self.endpoints = {
|
||||
default_endpoint: {
|
||||
"Host": get_context_host(host, skip_tls_verify or tls),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
# check docker endpoints
|
||||
for k, v in endpoints.items():
|
||||
if not isinstance(v, dict):
|
||||
# unknown format
|
||||
raise ContextException(
|
||||
"Unknown endpoint format for context {name}: {v}".format(name=name, v=v),
|
||||
)
|
||||
|
||||
self.endpoints[k] = v
|
||||
if k != "docker":
|
||||
continue
|
||||
|
||||
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
|
||||
host, skip_tls_verify or tls))
|
||||
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
|
||||
"SkipTLSVerify", skip_tls_verify))
|
||||
|
||||
def set_endpoint(
|
||||
self, name="docker", host=None, tls_cfg=None,
|
||||
skip_tls_verify=False, def_namespace=None):
|
||||
self.endpoints[name] = {
|
||||
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
|
||||
"SkipTLSVerify": skip_tls_verify
|
||||
}
|
||||
if def_namespace:
|
||||
self.endpoints[name]["DefaultNamespace"] = def_namespace
|
||||
|
||||
if tls_cfg:
|
||||
self.tls_cfg[name] = tls_cfg
|
||||
|
||||
def inspect(self):
|
||||
return self.__call__()
|
||||
|
||||
@classmethod
|
||||
def load_context(cls, name):
|
||||
meta = Context._load_meta(name)
|
||||
if meta:
|
||||
instance = cls(
|
||||
meta["Name"],
|
||||
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
|
||||
endpoints=meta.get("Endpoints", None),
|
||||
description=meta["Metadata"].get('Description'))
|
||||
instance.context_type = meta["Metadata"].get("Type", None)
|
||||
instance._load_certs()
|
||||
instance.meta_path = get_meta_dir(name)
|
||||
return instance
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _load_meta(cls, name):
|
||||
meta_file = get_meta_file(name)
|
||||
if not os.path.isfile(meta_file):
|
||||
return None
|
||||
|
||||
metadata = {}
|
||||
try:
|
||||
with open(meta_file) as f:
|
||||
metadata = json.load(f)
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# unknown format
|
||||
raise_from(Exception(
|
||||
"Detected corrupted meta file for context {name} : {e}".format(name=name, e=e)
|
||||
), e)
|
||||
|
||||
# for docker endpoints, set defaults for
|
||||
# Host and SkipTLSVerify fields
|
||||
for k, v in metadata["Endpoints"].items():
|
||||
if k != "docker":
|
||||
continue
|
||||
metadata["Endpoints"][k]["Host"] = v.get(
|
||||
"Host", get_context_host(None, False))
|
||||
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", True))
|
||||
|
||||
return metadata
|
||||
|
||||
def _load_certs(self):
|
||||
certs = {}
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint in self.endpoints.keys():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
continue
|
||||
ca_cert = None
|
||||
cert = None
|
||||
key = None
|
||||
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
|
||||
if filename.startswith("ca"):
|
||||
ca_cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("cert"):
|
||||
cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("key"):
|
||||
key = os.path.join(tls_dir, endpoint, filename)
|
||||
if all([cert, key]) or ca_cert:
|
||||
verify = None
|
||||
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||
"SkipTLSVerify", False):
|
||||
verify = True
|
||||
certs[endpoint] = TLSConfig(
|
||||
client_cert=(cert, key) if cert and key else None, ca_cert=ca_cert, verify=verify)
|
||||
self.tls_cfg = certs
|
||||
self.tls_path = tls_dir
|
||||
|
||||
def save(self):
|
||||
meta_dir = get_meta_dir(self.name)
|
||||
if not os.path.isdir(meta_dir):
|
||||
os.makedirs(meta_dir)
|
||||
with open(get_meta_file(self.name), "w") as f:
|
||||
f.write(json.dumps(self.Metadata))
|
||||
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
os.makedirs(os.path.join(tls_dir, endpoint))
|
||||
|
||||
ca_file = tls.ca_cert
|
||||
if ca_file:
|
||||
copyfile(ca_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(ca_file)))
|
||||
|
||||
if tls.cert:
|
||||
cert_file, key_file = tls.cert
|
||||
copyfile(cert_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(cert_file)))
|
||||
copyfile(key_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(key_file)))
|
||||
|
||||
self.meta_path = get_meta_dir(self.name)
|
||||
self.tls_path = get_tls_dir(self.name)
|
||||
|
||||
def remove(self):
|
||||
if os.path.isdir(self.meta_path):
|
||||
rmtree(self.meta_path)
|
||||
if os.path.isdir(self.tls_path):
|
||||
rmtree(self.tls_path)
|
||||
|
||||
def __repr__(self):
|
||||
return "<{classname}: '{name}'>".format(classname=self.__class__.__name__, name=self.name)
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps(self.__call__(), indent=2)
|
||||
|
||||
def __call__(self):
|
||||
result = self.Metadata
|
||||
result.update(self.TLSMaterial)
|
||||
result.update(self.Storage)
|
||||
return result
|
||||
|
||||
def is_docker_host(self):
|
||||
return self.context_type is None
|
||||
|
||||
@property
|
||||
def Name(self):
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def Host(self):
|
||||
if not self.orchestrator or self.orchestrator == "swarm":
|
||||
endpoint = self.endpoints.get("docker", None)
|
||||
if endpoint:
|
||||
return endpoint.get("Host", None)
|
||||
return None
|
||||
|
||||
return self.endpoints[self.orchestrator].get("Host", None)
|
||||
|
||||
@property
|
||||
def Orchestrator(self):
|
||||
return self.orchestrator
|
||||
|
||||
@property
|
||||
def Metadata(self):
|
||||
meta = {}
|
||||
if self.orchestrator:
|
||||
meta = {"StackOrchestrator": self.orchestrator}
|
||||
return {
|
||||
"Name": self.name,
|
||||
"Metadata": meta,
|
||||
"Endpoints": self.endpoints
|
||||
}
|
||||
|
||||
@property
|
||||
def TLSConfig(self):
|
||||
key = self.orchestrator
|
||||
if not key or key == "swarm":
|
||||
key = "docker"
|
||||
if key in self.tls_cfg.keys():
|
||||
return self.tls_cfg[key]
|
||||
return None
|
||||
|
||||
@property
|
||||
def TLSMaterial(self):
|
||||
certs = {}
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
cert, key = tls.cert
|
||||
certs[endpoint] = list(
|
||||
map(os.path.basename, [tls.ca_cert, cert, key]))
|
||||
return {
|
||||
"TLSMaterial": certs
|
||||
}
|
||||
|
||||
@property
|
||||
def Storage(self):
|
||||
return {
|
||||
"Storage": {
|
||||
"MetadataPath": self.meta_path,
|
||||
"TLSPath": self.tls_path
|
||||
}}
|
@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
PROGRAM_PREFIX = 'docker-credential-'
|
||||
DEFAULT_LINUX_STORE = 'secretservice'
|
||||
DEFAULT_OSX_STORE = 'osxkeychain'
|
||||
DEFAULT_WIN32_STORE = 'wincred'
|
@ -0,0 +1,38 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class StoreError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialsNotFound(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
class InitializationError(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
def process_store_error(cpe, program):
|
||||
message = cpe.output.decode('utf-8')
|
||||
if 'credentials not found in native keychain' in message:
|
||||
return CredentialsNotFound(
|
||||
'No matching credentials in {0}'.format(
|
||||
program
|
||||
)
|
||||
)
|
||||
return StoreError(
|
||||
'Credentials store {0} exited with "{1}".'.format(
|
||||
program, cpe.output.decode('utf-8').strip()
|
||||
)
|
||||
)
|
@ -0,0 +1,119 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import errno
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type
|
||||
|
||||
from . import constants
|
||||
from . import errors
|
||||
from .utils import create_environment_dict
|
||||
from .utils import find_executable
|
||||
|
||||
|
||||
class Store(object):
|
||||
def __init__(self, program, environment=None):
|
||||
""" Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
'{0} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
|
||||
def get(self, server):
|
||||
""" Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, binary_type):
|
||||
server = server.encode('utf-8')
|
||||
data = self._execute('get', server)
|
||||
result = json.loads(data.decode('utf-8'))
|
||||
|
||||
# docker-credential-pass will return an object for inexistent servers
|
||||
# whereas other helpers will exit with returncode != 0. For
|
||||
# consistency, if no significant data is returned,
|
||||
# raise CredentialsNotFound
|
||||
if result['Username'] == '' and result['Secret'] == '':
|
||||
raise errors.CredentialsNotFound(
|
||||
'No matching credentials in {0}'.format(self.program)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def store(self, server, username, secret):
|
||||
""" Store credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
data_input = json.dumps({
|
||||
'ServerURL': server,
|
||||
'Username': username,
|
||||
'Secret': secret
|
||||
}).encode('utf-8')
|
||||
return self._execute('store', data_input)
|
||||
|
||||
def erase(self, server):
|
||||
""" Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, binary_type):
|
||||
server = server.encode('utf-8')
|
||||
self._execute('erase', server)
|
||||
|
||||
def list(self):
|
||||
""" List stored credentials. Requires v0.4.0+ of the helper.
|
||||
"""
|
||||
data = self._execute('list', None)
|
||||
return json.loads(data.decode('utf-8'))
|
||||
|
||||
def _execute(self, subcmd, data_input):
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
if PY3:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd], input=data_input, env=env,
|
||||
)
|
||||
else:
|
||||
process = subprocess.Popen(
|
||||
[self.exe, subcmd], stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, env=env,
|
||||
)
|
||||
output, dummy = process.communicate(data_input)
|
||||
if process.returncode != 0:
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=process.returncode, cmd='', output=output
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
'{0} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise errors.StoreError(
|
||||
'Unexpected OS error "{0}", errno={1}'.format(
|
||||
e.strerror, e.errno
|
||||
)
|
||||
)
|
||||
return output
|
@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
if PY2:
|
||||
from distutils.spawn import find_executable as which
|
||||
else:
|
||||
from shutil import which
|
||||
|
||||
|
||||
def find_executable(executable, path=None):
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
if not PY2:
|
||||
# shutil.which() already uses PATHEXT on Windows, so on
|
||||
# Python 3 we can simply use shutil.which() in all cases.
|
||||
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
|
||||
return which(executable, path=path)
|
||||
|
||||
if sys.platform != 'win32':
|
||||
return which(executable, path)
|
||||
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
for ext in extensions:
|
||||
f = os.path.join(p, base + ext)
|
||||
if os.path.isfile(f):
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def create_environment_dict(overrides):
|
||||
"""
|
||||
Create and return a copy of os.environ with the specified overrides
|
||||
"""
|
||||
result = os.environ.copy()
|
||||
result.update(overrides or {})
|
||||
return result
|
@ -0,0 +1,224 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ._import_helper import HTTPError as _HTTPError
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
A base class from which all other exceptions inherit.
|
||||
|
||||
If you want to catch all errors that the Docker SDK might raise,
|
||||
catch this base exception.
|
||||
"""
|
||||
|
||||
|
||||
def create_api_error_from_http_exception(e):
|
||||
"""
|
||||
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||
"""
|
||||
response = e.response
|
||||
try:
|
||||
explanation = response.json()['message']
|
||||
except ValueError:
|
||||
explanation = to_native((response.content or '').strip())
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and ('No such image' in str(explanation) or
|
||||
'not found: does not exist or no pull access'
|
||||
in str(explanation) or
|
||||
'repository does not exist' in str(explanation)):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise_from(cls(e, response=response, explanation=explanation), e)
|
||||
|
||||
|
||||
class APIError(_HTTPError, DockerException):
|
||||
"""
|
||||
An HTTP error from the API.
|
||||
"""
|
||||
def __init__(self, message, response=None, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 does not
|
||||
super(APIError, self).__init__(message)
|
||||
self.response = response
|
||||
self.explanation = explanation
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '{0} Client Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '{0} Server Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '{0} ("{1}")'.format(message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
if self.response is not None:
|
||||
return self.response.status_code
|
||||
|
||||
def is_error(self):
|
||||
return self.is_client_error() or self.is_server_error()
|
||||
|
||||
def is_client_error(self):
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 400 <= self.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 500 <= self.status_code < 600
|
||||
|
||||
|
||||
class NotFound(APIError):
|
||||
pass
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidVersion(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgument(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class DeprecatedMethod(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class TLSParameterError(DockerException):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg + (". TLS configurations should map the Docker CLI "
|
||||
"client configurations. See "
|
||||
"https://docs.docker.com/engine/articles/https/ "
|
||||
"for API details.")
|
||||
|
||||
|
||||
class NullResource(DockerException, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class ContainerError(DockerException):
|
||||
"""
|
||||
Represents a container that has exited with a non-zero exit code.
|
||||
"""
|
||||
def __init__(self, container, exit_status, command, image, stderr):
|
||||
self.container = container
|
||||
self.exit_status = exit_status
|
||||
self.command = command
|
||||
self.image = image
|
||||
self.stderr = stderr
|
||||
|
||||
err = ": {0}".format(stderr) if stderr is not None else ""
|
||||
msg = ("Command '{0}' in image '{1}' returned non-zero exit "
|
||||
"status {2}{3}").format(command, image, exit_status, err)
|
||||
|
||||
super(ContainerError, self).__init__(msg)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class BuildError(DockerException):
|
||||
def __init__(self, reason, build_log):
|
||||
super(BuildError, self).__init__(reason)
|
||||
self.msg = reason
|
||||
self.build_log = build_log
|
||||
|
||||
|
||||
class ImageLoadError(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
def create_unexpected_kwargs_error(name, kwargs):
|
||||
quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)]
|
||||
text = ["{0}() ".format(name)]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
text.append("got unexpected keyword arguments ")
|
||||
text.append(', '.join(quoted_kwargs))
|
||||
return TypeError(''.join(text))
|
||||
|
||||
|
||||
class MissingContextParameter(DockerException):
|
||||
def __init__(self, param):
|
||||
self.param = param
|
||||
|
||||
def __str__(self):
|
||||
return ("missing parameter: {0}".format(self.param))
|
||||
|
||||
|
||||
class ContextAlreadyExists(DockerException):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context {0} already exists".format(self.name))
|
||||
|
||||
|
||||
class ContextException(DockerException):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return (self.msg)
|
||||
|
||||
|
||||
class ContextNotFound(DockerException):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context '{0}' not found".format(self.name))
|
||||
|
||||
|
||||
class MissingRequirementException(DockerException):
|
||||
def __init__(self, msg, requirement, import_exception):
|
||||
self.msg = msg
|
||||
self.requirement = requirement
|
||||
self.import_exception = import_exception
|
||||
|
||||
def __str__(self):
|
||||
return (self.msg)
|
@ -0,0 +1,119 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from . import errors
|
||||
from .transport.ssladapter import SSLHTTPAdapter
|
||||
|
||||
|
||||
class TLSConfig(object):
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
cert = None
|
||||
ca_cert = None
|
||||
verify = None
|
||||
ssl_version = None
|
||||
|
||||
def __init__(self, client_cert=None, ca_cert=None, verify=None,
|
||||
ssl_version=None, assert_hostname=None):
|
||||
# Argument compatibility/mapping with
|
||||
# https://docs.docker.com/engine/articles/https/
|
||||
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||
# here, but also disable any public/default CA pool verification by
|
||||
# leaving verify=False
|
||||
|
||||
self.assert_hostname = assert_hostname
|
||||
|
||||
# If the user provides an SSL version, we should use their preference
|
||||
if ssl_version:
|
||||
self.ssl_version = ssl_version
|
||||
elif (sys.version_info.major, sys.version_info.minor) < (3, 6):
|
||||
# If the user provides no ssl version, we should default to
|
||||
# TLSv1_2. This option is the most secure, and will work for the
|
||||
# majority of users with reasonably up-to-date software. However,
|
||||
# before doing so, detect openssl version to ensure we can support
|
||||
# it.
|
||||
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
|
||||
ssl, 'PROTOCOL_TLSv1_2'):
|
||||
# If the OpenSSL version is high enough to support TLSv1_2,
|
||||
# then we should use it.
|
||||
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
|
||||
else:
|
||||
# Otherwise, TLS v1.0 seems to be the safest default;
|
||||
# SSLv23 fails in mysterious ways:
|
||||
# https://github.com/docker/docker-py/issues/963
|
||||
self.ssl_version = ssl.PROTOCOL_TLSv1
|
||||
else:
|
||||
self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
|
||||
|
||||
# "client_cert" must have both or neither cert/key files. In
|
||||
# either case, Alert the user when both are expected, but any are
|
||||
# missing.
|
||||
|
||||
if client_cert:
|
||||
try:
|
||||
tls_cert, tls_key = client_cert
|
||||
except ValueError:
|
||||
raise errors.TLSParameterError(
|
||||
'client_cert must be a tuple of'
|
||||
' (client certificate, key file)'
|
||||
)
|
||||
|
||||
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
|
||||
not os.path.isfile(tls_key)):
|
||||
raise errors.TLSParameterError(
|
||||
'Path to a certificate and key files must be provided'
|
||||
' through the client_cert param'
|
||||
)
|
||||
self.cert = (tls_cert, tls_key)
|
||||
|
||||
# If verify is set, make sure the cert exists
|
||||
self.verify = verify
|
||||
self.ca_cert = ca_cert
|
||||
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
|
||||
raise errors.TLSParameterError(
|
||||
'Invalid CA certificate provided for `ca_cert`.'
|
||||
)
|
||||
|
||||
def configure_client(self, client):
|
||||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
client.ssl_version = self.ssl_version
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
client.verify = self.ca_cert
|
||||
else:
|
||||
client.verify = self.verify
|
||||
|
||||
if self.cert:
|
||||
client.cert = self.cert
|
||||
|
||||
client.mount('https://', SSLHTTPAdapter(
|
||||
ssl_version=self.ssl_version,
|
||||
assert_hostname=self.assert_hostname,
|
||||
))
|
@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .._import_helper import HTTPAdapter as _HTTPAdapter
|
||||
|
||||
|
||||
class BaseHTTPAdapter(_HTTPAdapter):
|
||||
def close(self):
|
||||
super(BaseHTTPAdapter, self).close()
|
||||
if hasattr(self, 'pools'):
|
||||
self.pools.clear()
|
||||
|
||||
# Hotfix for requests 2.32.0 and 2.32.1: its commit
|
||||
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
|
||||
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
|
||||
# send(), but instead call _get_connection().
|
||||
def _get_connection(self, request, *args, **kwargs):
|
||||
return self.get_connection(request.url, kwargs.get('proxies'))
|
||||
|
||||
# Fix for requests 2.32.2+:
|
||||
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
|
||||
return self.get_connection(request.url, proxies)
|
@ -0,0 +1,113 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.six.moves.queue import Empty
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class NpipeHTTPConnection(urllib3_connection.HTTPConnection, object):
|
||||
def __init__(self, npipe_path, timeout=60):
|
||||
super(NpipeHTTPConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self):
|
||||
sock = NpipeSocket()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.npipe_path)
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, npipe_path, timeout=60, maxsize=10):
|
||||
super(NpipeHTTPConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self):
|
||||
return NpipeHTTPConnection(
|
||||
self.npipe_path, self.timeout
|
||||
)
|
||||
|
||||
# When re-using connections, urllib3 tries to call select() on our
|
||||
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
||||
# _get_conn, where that check happens.
|
||||
def _get_conn(self, timeout):
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
|
||||
except Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
|
||||
'pools',
|
||||
'timeout',
|
||||
'max_pool_size']
|
||||
|
||||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
||||
self.npipe_path = base_url.replace('npipe://', '')
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(NpipeHTTPAdapter, self).__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = NpipeHTTPConnectionPool(
|
||||
self.npipe_path, self.timeout,
|
||||
maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request, proxies):
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
||||
return request.path_url
|
@ -0,0 +1,259 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import functools
|
||||
import io
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
PYWIN32_IMPORT_ERROR = None
|
||||
try:
|
||||
import win32file
|
||||
import win32pipe
|
||||
import pywintypes
|
||||
import win32event
|
||||
import win32api
|
||||
except ImportError:
|
||||
PYWIN32_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
cERROR_PIPE_BUSY = 0xe7
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
MAXIMUM_RETRY_COUNT = 10
|
||||
|
||||
|
||||
def check_closed(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
if self._closed:
|
||||
raise RuntimeError(
|
||||
'Can not reuse socket after connection was closed.'
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
class NpipeSocket(object):
|
||||
""" Partial implementation of the socket API over windows named pipes.
|
||||
This implementation is only designed to be used as a client socket,
|
||||
and server-specific methods (bind, listen, accept...) are not
|
||||
implemented.
|
||||
"""
|
||||
|
||||
def __init__(self, handle=None):
|
||||
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
|
||||
self._handle = handle
|
||||
self._closed = False
|
||||
|
||||
def accept(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind(self, address):
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
self._handle.Close()
|
||||
self._closed = True
|
||||
|
||||
@check_closed
|
||||
def connect(self, address, retry_count=0):
|
||||
try:
|
||||
handle = win32file.CreateFile(
|
||||
address,
|
||||
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
|
||||
0,
|
||||
None,
|
||||
win32file.OPEN_EXISTING,
|
||||
(cSECURITY_ANONYMOUS
|
||||
| cSECURITY_SQOS_PRESENT
|
||||
| win32file.FILE_FLAG_OVERLAPPED),
|
||||
0
|
||||
)
|
||||
except win32pipe.error as e:
|
||||
# See Remarks:
|
||||
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
|
||||
if e.winerror == cERROR_PIPE_BUSY:
|
||||
# Another program or thread has grabbed our pipe instance
|
||||
# before we got to it. Wait for availability and attempt to
|
||||
# connect again.
|
||||
retry_count = retry_count + 1
|
||||
if (retry_count < MAXIMUM_RETRY_COUNT):
|
||||
time.sleep(1)
|
||||
return self.connect(address, retry_count)
|
||||
raise e
|
||||
|
||||
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
|
||||
|
||||
self._handle = handle
|
||||
self._address = address
|
||||
|
||||
@check_closed
|
||||
def connect_ex(self, address):
|
||||
return self.connect(address)
|
||||
|
||||
@check_closed
|
||||
def detach(self):
|
||||
self._closed = True
|
||||
return self._handle
|
||||
|
||||
@check_closed
|
||||
def dup(self):
|
||||
return NpipeSocket(self._handle)
|
||||
|
||||
def getpeername(self):
|
||||
return self._address
|
||||
|
||||
def getsockname(self):
|
||||
return self._address
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ioctl(self, control, option):
|
||||
raise NotImplementedError()
|
||||
|
||||
def listen(self, backlog):
|
||||
raise NotImplementedError()
|
||||
|
||||
def makefile(self, mode=None, bufsize=None):
|
||||
if mode.strip('b') != 'r':
|
||||
raise NotImplementedError()
|
||||
rawio = NpipeFileIOBase(self)
|
||||
if bufsize is None or bufsize <= 0:
|
||||
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||
|
||||
@check_closed
|
||||
def recv(self, bufsize, flags=0):
|
||||
err, data = win32file.ReadFile(self._handle, bufsize)
|
||||
return data
|
||||
|
||||
@check_closed
|
||||
def recvfrom(self, bufsize, flags=0):
|
||||
data = self.recv(bufsize, flags)
|
||||
return (data, self._address)
|
||||
|
||||
@check_closed
|
||||
def recvfrom_into(self, buf, nbytes=0, flags=0):
|
||||
return self.recv_into(buf, nbytes, flags), self._address
|
||||
|
||||
@check_closed
|
||||
def recv_into(self, buf, nbytes=0):
|
||||
if PY2:
|
||||
return self._recv_into_py2(buf, nbytes)
|
||||
|
||||
readbuf = buf
|
||||
if not isinstance(buf, memoryview):
|
||||
readbuf = memoryview(buf)
|
||||
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
err, data = win32file.ReadFile(
|
||||
self._handle,
|
||||
readbuf[:nbytes] if nbytes else readbuf,
|
||||
overlapped
|
||||
)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
def _recv_into_py2(self, buf, nbytes):
|
||||
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
|
||||
n = len(data)
|
||||
buf[:n] = data
|
||||
return n
|
||||
|
||||
@check_closed
|
||||
def send(self, string, flags=0):
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
win32file.WriteFile(self._handle, string, overlapped)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def sendall(self, string, flags=0):
|
||||
return self.send(string, flags)
|
||||
|
||||
@check_closed
|
||||
def sendto(self, string, address):
|
||||
self.connect(address)
|
||||
return self.send(string)
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
return self.settimeout(None)
|
||||
return self.settimeout(0)
|
||||
|
||||
def settimeout(self, value):
|
||||
if value is None:
|
||||
# Blocking mode
|
||||
self._timeout = win32event.INFINITE
|
||||
elif not isinstance(value, (float, int)) or value < 0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
else:
|
||||
# Timeout mode - Value converted to milliseconds
|
||||
self._timeout = int(value * 1000)
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def setsockopt(self, level, optname, value):
|
||||
raise NotImplementedError()
|
||||
|
||||
@check_closed
|
||||
def shutdown(self, how):
|
||||
return self.close()
|
||||
|
||||
|
||||
class NpipeFileIOBase(io.RawIOBase):
|
||||
def __init__(self, npipe_socket):
|
||||
self.sock = npipe_socket
|
||||
|
||||
def close(self):
|
||||
super(NpipeFileIOBase, self).close()
|
||||
self.sock = None
|
||||
|
||||
def fileno(self):
|
||||
return self.sock.fileno()
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def readinto(self, buf):
|
||||
return self.sock.recv_into(buf)
|
||||
|
||||
def seekable(self):
|
||||
return False
|
||||
|
||||
def writable(self):
|
||||
return False
|
@ -0,0 +1,270 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils.six.moves.queue import Empty
|
||||
from ansible.module_utils.six.moves.urllib_parse import urlparse
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
PARAMIKO_IMPORT_ERROR = None
|
||||
try:
|
||||
import paramiko
|
||||
except ImportError:
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class SSHSocket(socket.socket):
|
||||
def __init__(self, host):
|
||||
super(SSHSocket, self).__init__(
|
||||
socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.host = host
|
||||
self.port = None
|
||||
self.user = None
|
||||
if ':' in self.host:
|
||||
self.host, self.port = self.host.split(':')
|
||||
if '@' in self.host:
|
||||
self.user, self.host = self.host.split('@')
|
||||
|
||||
self.proc = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
args = ['ssh']
|
||||
if self.user:
|
||||
args = args + ['-l', self.user]
|
||||
|
||||
if self.port:
|
||||
args = args + ['-p', self.port]
|
||||
|
||||
args = args + ['--', self.host, 'docker system dial-stdio']
|
||||
|
||||
preexec_func = None
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
def f():
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
preexec_func = f
|
||||
|
||||
env = dict(os.environ)
|
||||
|
||||
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||
env.pop('LD_LIBRARY_PATH', None)
|
||||
env.pop('SSL_CERT_FILE', None)
|
||||
|
||||
self.proc = subprocess.Popen(
|
||||
args,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_func)
|
||||
|
||||
def _write(self, data):
|
||||
if not self.proc or self.proc.stdin.closed:
|
||||
raise Exception('SSH subprocess not initiated.'
|
||||
'connect() must be called first.')
|
||||
written = self.proc.stdin.write(data)
|
||||
self.proc.stdin.flush()
|
||||
return written
|
||||
|
||||
def sendall(self, data):
|
||||
self._write(data)
|
||||
|
||||
def send(self, data):
|
||||
return self._write(data)
|
||||
|
||||
def recv(self, n):
|
||||
if not self.proc:
|
||||
raise Exception('SSH subprocess not initiated.'
|
||||
'connect() must be called first.')
|
||||
return self.proc.stdout.read(n)
|
||||
|
||||
def makefile(self, mode):
|
||||
if not self.proc:
|
||||
self.connect()
|
||||
if PY3:
|
||||
self.proc.stdout.channel = self
|
||||
|
||||
return self.proc.stdout
|
||||
|
||||
def close(self):
|
||||
if not self.proc or self.proc.stdin.closed:
|
||||
return
|
||||
self.proc.stdin.write(b'\n\n')
|
||||
self.proc.stdin.flush()
|
||||
self.proc.terminate()
|
||||
|
||||
|
||||
class SSHConnection(urllib3_connection.HTTPConnection, object):
|
||||
def __init__(self, ssh_transport=None, timeout=60, host=None):
|
||||
super(SSHConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.ssh_transport = ssh_transport
|
||||
self.timeout = timeout
|
||||
self.ssh_host = host
|
||||
|
||||
def connect(self):
|
||||
if self.ssh_transport:
|
||||
sock = self.ssh_transport.open_session()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.exec_command('docker system dial-stdio')
|
||||
else:
|
||||
sock = SSHSocket(self.ssh_host)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect()
|
||||
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
scheme = 'ssh'
|
||||
|
||||
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
|
||||
super(SSHConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.ssh_transport = None
|
||||
self.timeout = timeout
|
||||
if ssh_client:
|
||||
self.ssh_transport = ssh_client.get_transport()
|
||||
self.ssh_host = host
|
||||
|
||||
def _new_conn(self):
|
||||
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
|
||||
|
||||
# When re-using connections, urllib3 calls fileno() on our
|
||||
# SSH channel instance, quickly overloading our fd limit. To avoid this,
|
||||
# we override _get_conn
|
||||
def _get_conn(self, timeout):
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
|
||||
except Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class SSHHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
|
||||
]
|
||||
|
||||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
|
||||
shell_out=False):
|
||||
self.ssh_client = None
|
||||
if not shell_out:
|
||||
self._create_paramiko_client(base_url)
|
||||
self._connect()
|
||||
|
||||
self.ssh_host = base_url
|
||||
if base_url.startswith('ssh://'):
|
||||
self.ssh_host = base_url[len('ssh://'):]
|
||||
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(SSHHTTPAdapter, self).__init__()
|
||||
|
||||
def _create_paramiko_client(self, base_url):
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
self.ssh_client = paramiko.SSHClient()
|
||||
base_url = urlparse(base_url)
|
||||
self.ssh_params = {
|
||||
"hostname": base_url.hostname,
|
||||
"port": base_url.port,
|
||||
"username": base_url.username,
|
||||
}
|
||||
ssh_config_file = os.path.expanduser("~/.ssh/config")
|
||||
if os.path.exists(ssh_config_file):
|
||||
conf = paramiko.SSHConfig()
|
||||
with open(ssh_config_file) as f:
|
||||
conf.parse(f)
|
||||
host_config = conf.lookup(base_url.hostname)
|
||||
if 'proxycommand' in host_config:
|
||||
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||
host_config['proxycommand']
|
||||
)
|
||||
if 'hostname' in host_config:
|
||||
self.ssh_params['hostname'] = host_config['hostname']
|
||||
if base_url.port is None and 'port' in host_config:
|
||||
self.ssh_params['port'] = host_config['port']
|
||||
if base_url.username is None and 'user' in host_config:
|
||||
self.ssh_params['username'] = host_config['user']
|
||||
if 'identityfile' in host_config:
|
||||
self.ssh_params['key_filename'] = host_config['identityfile']
|
||||
|
||||
self.ssh_client.load_system_host_keys()
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||
|
||||
def _connect(self):
|
||||
if self.ssh_client:
|
||||
self.ssh_client.connect(**self.ssh_params)
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
if not self.ssh_client:
|
||||
return SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host
|
||||
)
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
# Connection is closed try a reconnect
|
||||
if self.ssh_client and not self.ssh_client.get_transport():
|
||||
self._connect()
|
||||
|
||||
pool = SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def close(self):
|
||||
super(SSHHTTPAdapter, self).close()
|
||||
if self.ssh_client:
|
||||
self.ssh_client.close()
|
@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
""" Resolves OpenSSL issues in some servers:
|
||||
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
https://github.com/kennethreitz/requests/pull/799
|
||||
"""
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
|
||||
PoolManager = urllib3.poolmanager.PoolManager
|
||||
|
||||
|
||||
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['assert_hostname', 'ssl_version']
|
||||
|
||||
def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
|
||||
self.ssl_version = ssl_version
|
||||
self.assert_hostname = assert_hostname
|
||||
super(SSLHTTPAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
kwargs = {
|
||||
'num_pools': connections,
|
||||
'maxsize': maxsize,
|
||||
'block': block,
|
||||
}
|
||||
if self.assert_hostname is not None:
|
||||
kwargs['assert_hostname'] = self.assert_hostname
|
||||
if self.ssl_version and self.can_override_ssl_version():
|
||||
kwargs['ssl_version'] = self.ssl_version
|
||||
|
||||
self.poolmanager = PoolManager(**kwargs)
|
||||
|
||||
def get_connection(self, *args, **kwargs):
|
||||
"""
|
||||
Ensure assert_hostname is set correctly on our pool
|
||||
|
||||
We already take care of a normal poolmanager via init_poolmanager
|
||||
|
||||
But we still need to take care of when there is a proxy poolmanager
|
||||
"""
|
||||
conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
|
||||
if self.assert_hostname is not None and conn.assert_hostname != self.assert_hostname:
|
||||
conn.assert_hostname = self.assert_hostname
|
||||
return conn
|
||||
|
||||
def can_override_ssl_version(self):
|
||||
urllib_ver = urllib3.__version__.split('-')[0]
|
||||
if urllib_ver is None:
|
||||
return False
|
||||
if urllib_ver == 'dev':
|
||||
return True
|
||||
return LooseVersion(urllib_ver) > LooseVersion('1.5')
|
@ -0,0 +1,114 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import socket
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class UnixHTTPConnection(urllib3_connection.HTTPConnection, object):
|
||||
|
||||
def __init__(self, base_url, unix_socket, timeout=60):
|
||||
super(UnixHTTPConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
self.disable_buffering = False
|
||||
|
||||
def connect(self):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.unix_socket)
|
||||
self.sock = sock
|
||||
|
||||
def putheader(self, header, *values):
|
||||
super(UnixHTTPConnection, self).putheader(header, *values)
|
||||
if header == 'Connection' and 'Upgrade' in values:
|
||||
self.disable_buffering = True
|
||||
|
||||
def response_class(self, sock, *args, **kwargs):
|
||||
if PY2:
|
||||
# FIXME: We may need to disable buffering on Py3 as well,
|
||||
# but there's no clear way to do it at the moment. See:
|
||||
# https://github.com/docker/docker-py/issues/1799
|
||||
kwargs['buffering'] = not self.disable_buffering
|
||||
|
||||
return super(UnixHTTPConnection, self).response_class(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
|
||||
super(UnixHTTPConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.base_url = base_url
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self):
|
||||
return UnixHTTPConnection(
|
||||
self.base_url, self.socket_path, self.timeout
|
||||
)
|
||||
|
||||
|
||||
class UnixHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['pools',
|
||||
'socket_path',
|
||||
'timeout',
|
||||
'max_pool_size']
|
||||
|
||||
def __init__(self, socket_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
||||
socket_path = socket_url.replace('http+unix://', '')
|
||||
if not socket_path.startswith('/'):
|
||||
socket_path = '/' + socket_path
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(UnixHTTPAdapter, self).__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = UnixHTTPConnectionPool(
|
||||
url, self.socket_path, self.timeout,
|
||||
maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request, proxies):
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-py/issues/811
|
||||
return request.path_url
|
@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import socket
|
||||
|
||||
from .._import_helper import urllib3
|
||||
|
||||
from ..errors import DockerException
|
||||
|
||||
|
||||
class CancellableStream(object):
|
||||
"""
|
||||
Stream wrapper for real-time events, logs, etc. from the server.
|
||||
|
||||
Example:
|
||||
>>> events = client.events()
|
||||
>>> for event in events:
|
||||
... print(event)
|
||||
>>> # and cancel from another thread
|
||||
>>> events.close()
|
||||
"""
|
||||
|
||||
def __init__(self, stream, response):
|
||||
self._stream = stream
|
||||
self._response = response
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
try:
|
||||
return next(self._stream)
|
||||
except urllib3.exceptions.ProtocolError:
|
||||
raise StopIteration
|
||||
except socket.error:
|
||||
raise StopIteration
|
||||
|
||||
next = __next__
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Closes the event streaming.
|
||||
"""
|
||||
|
||||
if not self._response.raw.closed:
|
||||
# find the underlying socket object
|
||||
# based on api.client._get_raw_response_socket
|
||||
|
||||
sock_fp = self._response.raw._fp.fp
|
||||
|
||||
if hasattr(sock_fp, 'raw'):
|
||||
sock_raw = sock_fp.raw
|
||||
|
||||
if hasattr(sock_raw, 'sock'):
|
||||
sock = sock_raw.sock
|
||||
|
||||
elif hasattr(sock_raw, '_sock'):
|
||||
sock = sock_raw._sock
|
||||
|
||||
elif hasattr(sock_fp, 'channel'):
|
||||
# We are working with a paramiko (SSH) channel, which does not
|
||||
# support cancelable streams with the current implementation
|
||||
raise DockerException(
|
||||
'Cancellable streams not supported for the SSH protocol'
|
||||
)
|
||||
else:
|
||||
sock = sock_fp._sock
|
||||
|
||||
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
|
||||
sock, urllib3.contrib.pyopenssl.WrappedSocket):
|
||||
sock = sock.socket
|
||||
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
@ -0,0 +1,305 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import tarfile
|
||||
import tempfile
|
||||
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
from . import fnmatch
|
||||
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
|
||||
|
||||
|
||||
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
|
||||
|
||||
|
||||
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
|
||||
root = os.path.abspath(path)
|
||||
exclude = exclude or []
|
||||
dockerfile = dockerfile or (None, None)
|
||||
extra_files = []
|
||||
if dockerfile[1] is not None:
|
||||
dockerignore_contents = '\n'.join(
|
||||
(exclude or ['.dockerignore']) + [dockerfile[0]]
|
||||
)
|
||||
extra_files = [
|
||||
('.dockerignore', dockerignore_contents),
|
||||
dockerfile,
|
||||
]
|
||||
return create_archive(
|
||||
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
|
||||
root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
|
||||
)
|
||||
|
||||
|
||||
def exclude_paths(root, patterns, dockerfile=None):
|
||||
"""
|
||||
Given a root directory path and a list of .dockerignore patterns, return
|
||||
an iterator of all paths (both regular files and directories) in the root
|
||||
directory that do *not* match any of the patterns.
|
||||
|
||||
All paths returned are relative to the root.
|
||||
"""
|
||||
|
||||
if dockerfile is None:
|
||||
dockerfile = 'Dockerfile'
|
||||
|
||||
patterns.append('!' + dockerfile)
|
||||
pm = PatternMatcher(patterns)
|
||||
return set(pm.walk(root))
|
||||
|
||||
|
||||
def build_file_list(root):
|
||||
files = []
|
||||
for dirname, dirnames, fnames in os.walk(root):
|
||||
for filename in fnames + dirnames:
|
||||
longpath = os.path.join(dirname, filename)
|
||||
files.append(
|
||||
longpath.replace(root, '', 1).lstrip('/')
|
||||
)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def create_archive(root, files=None, fileobj=None, gzip=False,
|
||||
extra_files=None):
|
||||
extra_files = extra_files or []
|
||||
if not fileobj:
|
||||
fileobj = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
|
||||
if files is None:
|
||||
files = build_file_list(root)
|
||||
extra_names = set(e[0] for e in extra_files)
|
||||
for path in files:
|
||||
if path in extra_names:
|
||||
# Extra files override context files with the same name
|
||||
continue
|
||||
full_path = os.path.join(root, path)
|
||||
|
||||
i = t.gettarinfo(full_path, arcname=path)
|
||||
if i is None:
|
||||
# This happens when we encounter a socket file. We can safely
|
||||
# ignore it and proceed.
|
||||
continue
|
||||
|
||||
# Workaround https://bugs.python.org/issue32713
|
||||
if i.mtime < 0 or i.mtime > 8**11 - 1:
|
||||
i.mtime = int(i.mtime)
|
||||
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
# Windows does not keep track of the execute bit, so we make files
|
||||
# and directories executable by default.
|
||||
i.mode = i.mode & 0o755 | 0o111
|
||||
|
||||
if i.isfile():
|
||||
try:
|
||||
with open(full_path, 'rb') as f:
|
||||
t.addfile(i, f)
|
||||
except IOError:
|
||||
raise IOError(
|
||||
'Can not read file in context: {0}'.format(full_path)
|
||||
)
|
||||
else:
|
||||
# Directories, FIFOs, symlinks... do not need to be read.
|
||||
t.addfile(i, None)
|
||||
|
||||
for name, contents in extra_files:
|
||||
info = tarfile.TarInfo(name)
|
||||
contents_encoded = contents.encode('utf-8')
|
||||
info.size = len(contents_encoded)
|
||||
t.addfile(info, io.BytesIO(contents_encoded))
|
||||
|
||||
t.close()
|
||||
fileobj.seek(0)
|
||||
return fileobj
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w', fileobj=f)
|
||||
if isinstance(dockerfile, io.StringIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
if PY3:
|
||||
raise TypeError('Please use io.BytesIO to create in-memory '
|
||||
'Dockerfiles with Python 3')
|
||||
else:
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
elif isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
else:
|
||||
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
|
||||
t.addfile(dfinfo, dockerfile)
|
||||
t.close()
|
||||
f.seek(0)
|
||||
return f
|
||||
|
||||
|
||||
def split_path(p):
|
||||
return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
|
||||
|
||||
|
||||
def normalize_slashes(p):
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return '/'.join(split_path(p))
|
||||
return p
|
||||
|
||||
|
||||
def walk(root, patterns, default=True):
|
||||
pm = PatternMatcher(patterns)
|
||||
return pm.walk(root)
|
||||
|
||||
|
||||
# Heavily based on
|
||||
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||
class PatternMatcher(object):
|
||||
def __init__(self, patterns):
|
||||
self.patterns = list(filter(
|
||||
lambda p: p.dirs, [Pattern(p) for p in patterns]
|
||||
))
|
||||
self.patterns.append(Pattern('!.dockerignore'))
|
||||
|
||||
def matches(self, filepath):
|
||||
matched = False
|
||||
parent_path = os.path.dirname(filepath)
|
||||
parent_path_dirs = split_path(parent_path)
|
||||
|
||||
for pattern in self.patterns:
|
||||
negative = pattern.exclusion
|
||||
match = pattern.match(filepath)
|
||||
if not match and parent_path != '':
|
||||
if len(pattern.dirs) <= len(parent_path_dirs):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
|
||||
)
|
||||
|
||||
if match:
|
||||
matched = not negative
|
||||
|
||||
return matched
|
||||
|
||||
def walk(self, root):
|
||||
def rec_walk(current_dir):
|
||||
for f in os.listdir(current_dir):
|
||||
fpath = os.path.join(
|
||||
os.path.relpath(current_dir, root), f
|
||||
)
|
||||
if fpath.startswith('.' + os.path.sep):
|
||||
fpath = fpath[2:]
|
||||
match = self.matches(fpath)
|
||||
if not match:
|
||||
yield fpath
|
||||
|
||||
cur = os.path.join(root, fpath)
|
||||
if not os.path.isdir(cur) or os.path.islink(cur):
|
||||
continue
|
||||
|
||||
if match:
|
||||
# If we want to skip this file and it is a directory
|
||||
# then we should first check to see if there's an
|
||||
# excludes pattern (e.g. !dir/file) that starts with this
|
||||
# dir. If so then we cannot skip this dir.
|
||||
skip = True
|
||||
|
||||
for pat in self.patterns:
|
||||
if not pat.exclusion:
|
||||
continue
|
||||
if pat.cleaned_pattern.startswith(
|
||||
normalize_slashes(fpath)):
|
||||
skip = False
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
for sub in rec_walk(cur):
|
||||
yield sub
|
||||
|
||||
return rec_walk(root)
|
||||
|
||||
|
||||
class Pattern(object):
|
||||
def __init__(self, pattern_str):
|
||||
self.exclusion = False
|
||||
if pattern_str.startswith('!'):
|
||||
self.exclusion = True
|
||||
pattern_str = pattern_str[1:]
|
||||
|
||||
self.dirs = self.normalize(pattern_str)
|
||||
self.cleaned_pattern = '/'.join(self.dirs)
|
||||
|
||||
@classmethod
|
||||
def normalize(cls, p):
|
||||
|
||||
# Remove trailing spaces
|
||||
p = p.strip()
|
||||
|
||||
# Leading and trailing slashes are not relevant. Yes,
|
||||
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||
# components are not relevant either, even if the whole
|
||||
# pattern is only ".", as the Docker reference states: "For
|
||||
# historical reasons, the pattern . is ignored."
|
||||
# ".." component must be cleared with the potential previous
|
||||
# component, regardless of whether it exists: "A preprocessing
|
||||
# step [...] eliminates . and .. elements using Go's
|
||||
# filepath.".
|
||||
i = 0
|
||||
split = split_path(p)
|
||||
while i < len(split):
|
||||
if split[i] == '..':
|
||||
del split[i]
|
||||
if i > 0:
|
||||
del split[i - 1]
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
return split
|
||||
|
||||
def match(self, filepath):
|
||||
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
|
||||
|
||||
|
||||
def process_dockerfile(dockerfile, path):
|
||||
if not dockerfile:
|
||||
return (None, None)
|
||||
|
||||
abs_dockerfile = dockerfile
|
||||
if not os.path.isabs(dockerfile):
|
||||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if IS_WINDOWS_PLATFORM and path.startswith(
|
||||
WINDOWS_LONGPATH_PREFIX):
|
||||
abs_dockerfile = '{0}{1}'.format(
|
||||
WINDOWS_LONGPATH_PREFIX,
|
||||
os.path.normpath(
|
||||
abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):]
|
||||
)
|
||||
)
|
||||
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
|
||||
os.path.relpath(abs_dockerfile, path).startswith('..')):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile) as df:
|
||||
return (
|
||||
'.dockerfile.{random:x}'.format(random=random.getrandbits(160)),
|
||||
df.read()
|
||||
)
|
||||
|
||||
# Dockerfile is inside the context - return path relative to context root
|
||||
if dockerfile == abs_dockerfile:
|
||||
# Only calculate relpath if necessary to avoid errors
|
||||
# on Windows client -> Linux Docker
|
||||
# see https://github.com/docker/compose/issues/5969
|
||||
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||
return (dockerfile, None)
|
@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
|
||||
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
|
||||
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_default_config_file():
|
||||
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
|
||||
|
||||
|
||||
def find_config_file(config_path=None):
|
||||
homedir = home_dir()
|
||||
paths = list(filter(None, [
|
||||
config_path, # 1
|
||||
config_path_from_environment(), # 2
|
||||
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
|
||||
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||
]))
|
||||
|
||||
log.debug("Trying paths: %s", repr(paths))
|
||||
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: %s", path)
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def config_path_from_environment():
|
||||
config_dir = os.environ.get('DOCKER_CONFIG')
|
||||
if not config_dir:
|
||||
return None
|
||||
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
|
||||
|
||||
|
||||
def home_dir():
|
||||
"""
|
||||
Get the user's home directory, using the same logic as the Docker Engine
|
||||
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return os.environ.get('USERPROFILE', '')
|
||||
else:
|
||||
return os.path.expanduser('~')
|
||||
|
||||
|
||||
def load_general_config(config_path=None):
|
||||
config_file = find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
return json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
# In the case of a legacy `.dockercfg` file, we will not
|
||||
# be able to load any JSON data.
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import functools
|
||||
|
||||
from .. import errors
|
||||
from . import utils
|
||||
|
||||
|
||||
def check_resource(resource_name):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, resource_id=None, *args, **kwargs):
|
||||
if resource_id is None and kwargs.get(resource_name):
|
||||
resource_id = kwargs.pop(resource_name)
|
||||
if isinstance(resource_id, dict):
|
||||
resource_id = resource_id.get('Id', resource_id.get('ID'))
|
||||
if not resource_id:
|
||||
raise errors.NullResource(
|
||||
'Resource ID was not provided'
|
||||
)
|
||||
return f(self, resource_id, *args, **kwargs)
|
||||
return wrapped
|
||||
return decorator
|
||||
|
||||
|
||||
def minimum_version(version):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if utils.version_lt(self._version, version):
|
||||
raise errors.InvalidVersion(
|
||||
'{0} is not available for version < {1}'.format(
|
||||
f.__name__, version
|
||||
)
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def update_headers(f):
|
||||
def inner(self, *args, **kwargs):
|
||||
if 'HttpHeaders' in self._general_configs:
|
||||
if not kwargs.get('headers'):
|
||||
kwargs['headers'] = self._general_configs['HttpHeaders']
|
||||
else:
|
||||
kwargs['headers'].update(self._general_configs['HttpHeaders'])
|
||||
return f(self, *args, **kwargs)
|
||||
return inner
|
@ -0,0 +1,127 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""Filename matching with shell patterns.
|
||||
|
||||
fnmatch(FILENAME, PATTERN) matches according to the local convention.
|
||||
fnmatchcase(FILENAME, PATTERN) always takes case in account.
|
||||
|
||||
The functions operate by translating the pattern into a regular
|
||||
expression. They cache the compiled regular expressions for speed.
|
||||
|
||||
The function translate(PATTERN) returns a regular expression
|
||||
corresponding to PATTERN. (It does not compile it.)
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ["fnmatch", "fnmatchcase", "translate"]
|
||||
|
||||
_cache = {}
|
||||
_MAXCACHE = 100
|
||||
|
||||
|
||||
def _purge():
|
||||
"""Clear the pattern cache"""
|
||||
_cache.clear()
|
||||
|
||||
|
||||
def fnmatch(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
|
||||
Patterns are Unix shell style:
|
||||
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
|
||||
name = name.lower()
|
||||
pat = pat.lower()
|
||||
return fnmatchcase(name, pat)
|
||||
|
||||
|
||||
def fnmatchcase(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
This is a version of fnmatch() which does not case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
|
||||
try:
|
||||
re_pat = _cache[pat]
|
||||
except KeyError:
|
||||
res = translate(pat)
|
||||
if len(_cache) >= _MAXCACHE:
|
||||
_cache.clear()
|
||||
_cache[pat] = re_pat = re.compile(res)
|
||||
return re_pat.match(name) is not None
|
||||
|
||||
|
||||
def translate(pat):
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
i, n = 0, len(pat)
|
||||
res = '^'
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i + 1
|
||||
if c == '*':
|
||||
if i < n and pat[i] == '*':
|
||||
# is some flavor of "**"
|
||||
i = i + 1
|
||||
# Treat **/ as ** so eat the "/"
|
||||
if i < n and pat[i] == '/':
|
||||
i = i + 1
|
||||
if i >= n:
|
||||
# is "**EOF" - to align with .gitignore just accept all
|
||||
res = res + '.*'
|
||||
else:
|
||||
# is "**"
|
||||
# Note that this allows for any # of /'s (even 0) because
|
||||
# the .* will eat everything, even /'s
|
||||
res = res + '(.*/)?'
|
||||
else:
|
||||
# is "*" so map it to anything but "/"
|
||||
res = res + '[^/]*'
|
||||
elif c == '?':
|
||||
# "?" is any char except "/"
|
||||
res = res + '[^/]'
|
||||
elif c == '[':
|
||||
j = i
|
||||
if j < n and pat[j] == '!':
|
||||
j = j + 1
|
||||
if j < n and pat[j] == ']':
|
||||
j = j + 1
|
||||
while j < n and pat[j] != ']':
|
||||
j = j + 1
|
||||
if j >= n:
|
||||
res = res + '\\['
|
||||
else:
|
||||
stuff = pat[i:j].replace('\\', '\\\\')
|
||||
i = j + 1
|
||||
if stuff[0] == '!':
|
||||
stuff = '^' + stuff[1:]
|
||||
elif stuff[0] == '^':
|
||||
stuff = '\\' + stuff
|
||||
res = '%s[%s]' % (res, stuff)
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
|
||||
return res + '$'
|
@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
def stream_as_text(stream):
|
||||
"""
|
||||
Given a stream of bytes or text, if any of the items in the stream
|
||||
are bytes convert them to text.
|
||||
This function can be removed once we return text streams
|
||||
instead of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, text_type):
|
||||
data = data.decode('utf-8', 'replace')
|
||||
yield data
|
||||
|
||||
|
||||
def json_splitter(buffer):
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
|
||||
return obj, rest
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def json_stream(stream):
|
||||
"""Given a stream of text, return a stream of json objects.
|
||||
This handles streams which are inconsistently buffered (some entries may
|
||||
be newline delimited, and others are not).
|
||||
"""
|
||||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer, separator=u'\n'):
|
||||
index = buffer.find(text_type(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[:index + 1], buffer[index + 1:]
|
||||
|
||||
|
||||
def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
||||
"""Given a generator which yields strings and a splitter function,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Unlike string.split(), each chunk includes the trailing
|
||||
separator, except for the last one if none was found on the end
|
||||
of the input.
|
||||
"""
|
||||
splitter = splitter or line_splitter
|
||||
buffered = text_type('')
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
while True:
|
||||
buffer_split = splitter(buffered)
|
||||
if buffer_split is None:
|
||||
break
|
||||
|
||||
item, buffered = buffer_split
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e)
|
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
||||
def add_port_mapping(port_bindings, internal_port, external):
|
||||
if internal_port in port_bindings:
|
||||
port_bindings[internal_port].append(external)
|
||||
else:
|
||||
port_bindings[internal_port] = [external]
|
||||
|
||||
|
||||
def add_port(port_bindings, internal_port_range, external_range):
|
||||
if external_range is None:
|
||||
for internal_port in internal_port_range:
|
||||
add_port_mapping(port_bindings, internal_port, None)
|
||||
else:
|
||||
ports = zip(internal_port_range, external_range)
|
||||
for internal_port, external_port in ports:
|
||||
add_port_mapping(port_bindings, internal_port, external_port)
|
||||
|
||||
|
||||
def build_port_bindings(ports):
|
||||
port_bindings = {}
|
||||
for port in ports:
|
||||
internal_port_range, external_range = split_port(port)
|
||||
add_port(port_bindings, internal_port_range, external_range)
|
||||
return port_bindings
|
||||
|
||||
|
||||
def _raise_invalid_port(port):
|
||||
raise ValueError('Invalid port "%s", should be '
|
||||
'[[remote_ip:]remote_port[-remote_port]:]'
|
||||
'port[/protocol]' % port)
|
||||
|
||||
|
||||
def port_range(start, end, proto, randomly_available_port=False):
|
||||
if not start:
|
||||
return start
|
||||
if not end:
|
||||
return [start + proto]
|
||||
if randomly_available_port:
|
||||
return ['{0}-{1}'.format(start, end) + proto]
|
||||
return [str(port) + proto for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
def split_port(port):
|
||||
if hasattr(port, 'legacy_repr'):
|
||||
# This is the worst hack, but it prevents a bug in Compose 1.14.0
|
||||
# https://github.com/docker/docker-py/issues/1668
|
||||
# TODO: remove once fixed in Compose stable
|
||||
port = port.legacy_repr()
|
||||
port = str(port)
|
||||
match = PORT_SPEC.match(port)
|
||||
if match is None:
|
||||
_raise_invalid_port(port)
|
||||
parts = match.groupdict()
|
||||
|
||||
host = parts['host']
|
||||
proto = parts['proto'] or ''
|
||||
internal = port_range(parts['int'], parts['int_end'], proto)
|
||||
external = port_range(
|
||||
parts['ext'], parts['ext_end'], '', len(internal) == 1)
|
||||
|
||||
if host is None:
|
||||
if external is not None and len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
return internal, external
|
||||
else:
|
||||
if not external:
|
||||
external = [None] * len(internal)
|
||||
elif len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
return internal, [(host, ext_port) for ext_port in external]
|
@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .utils import format_environment
|
||||
|
||||
|
||||
class ProxyConfig(dict):
|
||||
'''
|
||||
Hold the client's proxy configuration
|
||||
'''
|
||||
@property
|
||||
def http(self):
|
||||
return self.get('http')
|
||||
|
||||
@property
|
||||
def https(self):
|
||||
return self.get('https')
|
||||
|
||||
@property
|
||||
def ftp(self):
|
||||
return self.get('ftp')
|
||||
|
||||
@property
|
||||
def no_proxy(self):
|
||||
return self.get('no_proxy')
|
||||
|
||||
@staticmethod
|
||||
def from_dict(config):
|
||||
'''
|
||||
Instantiate a new ProxyConfig from a dictionary that represents a
|
||||
client configuration, as described in `the documentation`_.
|
||||
|
||||
.. _the documentation:
|
||||
https://docs.docker.com/network/proxy/#configure-the-docker-client
|
||||
'''
|
||||
return ProxyConfig(
|
||||
http=config.get('httpProxy'),
|
||||
https=config.get('httpsProxy'),
|
||||
ftp=config.get('ftpProxy'),
|
||||
no_proxy=config.get('noProxy'),
|
||||
)
|
||||
|
||||
def get_environment(self):
|
||||
'''
|
||||
Return a dictionary representing the environment variables used to
|
||||
set the proxy settings.
|
||||
'''
|
||||
env = {}
|
||||
if self.http:
|
||||
env['http_proxy'] = env['HTTP_PROXY'] = self.http
|
||||
if self.https:
|
||||
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
|
||||
if self.ftp:
|
||||
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
|
||||
if self.no_proxy:
|
||||
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
|
||||
return env
|
||||
|
||||
def inject_proxy_environment(self, environment):
|
||||
'''
|
||||
Given a list of strings representing environment variables, prepend the
|
||||
environment variables corresponding to the proxy settings.
|
||||
'''
|
||||
if not self:
|
||||
return environment
|
||||
|
||||
proxy_env = format_environment(self.get_environment())
|
||||
if not environment:
|
||||
return proxy_env
|
||||
# It is important to prepend our variables, because we want the
|
||||
# variables defined in "environment" to take precedence.
|
||||
return proxy_env + environment
|
||||
|
||||
def __str__(self):
|
||||
return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format(
|
||||
self.http, self.https, self.ftp, self.no_proxy)
|
@ -0,0 +1,199 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import errno
|
||||
import os
|
||||
import select
|
||||
import socket as pysocket
|
||||
import struct
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type
|
||||
|
||||
from ..transport.npipesocket import NpipeSocket
|
||||
|
||||
|
||||
STDOUT = 1
|
||||
STDERR = 2
|
||||
|
||||
|
||||
class SocketError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# NpipeSockets have their own error types
|
||||
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||
NPIPE_ENDED = 109
|
||||
|
||||
|
||||
def read(socket, n=4096):
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
"""
|
||||
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
if PY3 and not isinstance(socket, NpipeSocket):
|
||||
if not hasattr(select, "poll"):
|
||||
# Limited to 1024
|
||||
select.select([socket], [], [])
|
||||
else:
|
||||
poll = select.poll()
|
||||
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||
poll.poll()
|
||||
|
||||
try:
|
||||
if hasattr(socket, 'recv'):
|
||||
return socket.recv(n)
|
||||
if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
|
||||
return socket.read(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
except Exception as e:
|
||||
is_pipe_ended = (isinstance(socket, NpipeSocket) and
|
||||
len(e.args) > 0 and
|
||||
e.args[0] == NPIPE_ENDED)
|
||||
if is_pipe_ended:
|
||||
# npipes do not support duplex sockets, so we interpret
|
||||
# a PIPE_ENDED error as a close operation (0-length read).
|
||||
return ''
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket, n):
|
||||
"""
|
||||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there is not enough data
|
||||
"""
|
||||
data = binary_type()
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
raise SocketError("Unexpected EOF")
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
def next_frame_header(socket):
|
||||
"""
|
||||
Returns the stream and size of the next frame of data waiting to be read
|
||||
from socket, according to the protocol defined here:
|
||||
|
||||
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
|
||||
"""
|
||||
try:
|
||||
data = read_exactly(socket, 8)
|
||||
except SocketError:
|
||||
return (-1, -1)
|
||||
|
||||
stream, actual = struct.unpack('>BxxxL', data)
|
||||
return (stream, actual)
|
||||
|
||||
|
||||
def frames_iter(socket, tty):
|
||||
"""
|
||||
Return a generator of frames read from socket. A frame is a tuple where
|
||||
the first item is the stream number and the second item is a chunk of data.
|
||||
|
||||
If the tty setting is enabled, the streams are multiplexed into the stdout
|
||||
stream.
|
||||
"""
|
||||
if tty:
|
||||
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
|
||||
else:
|
||||
return frames_iter_no_tty(socket)
|
||||
|
||||
|
||||
def frames_iter_no_tty(socket):
|
||||
"""
|
||||
Returns a generator of data read from the socket when the tty setting is
|
||||
not enabled.
|
||||
"""
|
||||
while True:
|
||||
(stream, n) = next_frame_header(socket)
|
||||
if n < 0:
|
||||
break
|
||||
while n > 0:
|
||||
result = read(socket, n)
|
||||
if result is None:
|
||||
continue
|
||||
data_length = len(result)
|
||||
if data_length == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
n -= data_length
|
||||
yield (stream, result)
|
||||
|
||||
|
||||
def frames_iter_tty(socket):
|
||||
"""
|
||||
Return a generator of data read from the socket when the tty setting is
|
||||
enabled.
|
||||
"""
|
||||
while True:
|
||||
result = read(socket)
|
||||
if len(result) == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
yield result
|
||||
|
||||
|
||||
def consume_socket_output(frames, demux=False):
|
||||
"""
|
||||
Iterate through frames read from the socket and return the result.
|
||||
|
||||
Args:
|
||||
|
||||
demux (bool):
|
||||
If False, stdout and stderr are multiplexed, and the result is the
|
||||
concatenation of all the frames. If True, the streams are
|
||||
demultiplexed, and the result is a 2-tuple where each item is the
|
||||
concatenation of frames belonging to the same stream.
|
||||
"""
|
||||
if demux is False:
|
||||
# If the streams are multiplexed, the generator returns strings, that
|
||||
# we just need to concatenate.
|
||||
return binary_type().join(frames)
|
||||
|
||||
# If the streams are demultiplexed, the generator yields tuples
|
||||
# (stdout, stderr)
|
||||
out = [None, None]
|
||||
for frame in frames:
|
||||
# It is guaranteed that for each frame, one and only one stream
|
||||
# is not None.
|
||||
if frame == (None, None):
|
||||
raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
|
||||
if frame[0] is not None:
|
||||
if out[0] is None:
|
||||
out[0] = frame[0]
|
||||
else:
|
||||
out[0] += frame[0]
|
||||
else:
|
||||
if out[1] is None:
|
||||
out[1] = frame[1]
|
||||
else:
|
||||
out[1] += frame[1]
|
||||
return tuple(out)
|
||||
|
||||
|
||||
def demux_adaptor(stream_id, data):
|
||||
"""
|
||||
Utility to demultiplex stdout and stderr when reading frames from the
|
||||
socket.
|
||||
"""
|
||||
if stream_id == STDOUT:
|
||||
return (data, None)
|
||||
elif stream_id == STDERR:
|
||||
return (None, data)
|
||||
else:
|
||||
raise ValueError('{0} is not a valid stream'.format(stream_id))
|
@ -0,0 +1,528 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shlex
|
||||
import string
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
|
||||
|
||||
from ansible.module_utils.six import PY2, PY3, binary_type, integer_types, iteritems, string_types, text_type
|
||||
|
||||
from .. import errors
|
||||
from ..constants import DEFAULT_HTTP_HOST
|
||||
from ..constants import DEFAULT_UNIX_SOCKET
|
||||
from ..constants import DEFAULT_NPIPE
|
||||
from ..constants import BYTE_UNITS
|
||||
from ..tls import TLSConfig
|
||||
|
||||
if PY2:
|
||||
from urlparse import urlparse, urlunparse
|
||||
else:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
|
||||
URLComponents = collections.namedtuple(
|
||||
'URLComponents',
|
||||
'scheme netloc url params query fragment',
|
||||
)
|
||||
|
||||
|
||||
def create_ipam_pool(*args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_ipam_pool has been removed. Please use a '
|
||||
'docker.types.IPAMPool object instead.'
|
||||
)
|
||||
|
||||
|
||||
def create_ipam_config(*args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_ipam_config has been removed. Please use a '
|
||||
'docker.types.IPAMConfig object instead.'
|
||||
)
|
||||
|
||||
|
||||
def decode_json_header(header):
|
||||
data = base64.b64decode(header)
|
||||
if PY3:
|
||||
data = data.decode('utf-8')
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def compare_version(v1, v2):
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
elif s1 > s2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def version_lt(v1, v2):
|
||||
return compare_version(v1, v2) > 0
|
||||
|
||||
|
||||
def version_gte(v1, v2):
|
||||
return not version_lt(v1, v2)
|
||||
|
||||
|
||||
def _convert_port_binding(binding):
|
||||
result = {'HostIp': '', 'HostPort': ''}
|
||||
if isinstance(binding, tuple):
|
||||
if len(binding) == 2:
|
||||
result['HostPort'] = binding[1]
|
||||
result['HostIp'] = binding[0]
|
||||
elif isinstance(binding[0], string_types):
|
||||
result['HostIp'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding[0]
|
||||
elif isinstance(binding, dict):
|
||||
if 'HostPort' in binding:
|
||||
result['HostPort'] = binding['HostPort']
|
||||
if 'HostIp' in binding:
|
||||
result['HostIp'] = binding['HostIp']
|
||||
else:
|
||||
raise ValueError(binding)
|
||||
else:
|
||||
result['HostPort'] = binding
|
||||
|
||||
if result['HostPort'] is None:
|
||||
result['HostPort'] = ''
|
||||
else:
|
||||
result['HostPort'] = str(result['HostPort'])
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def convert_port_bindings(port_bindings):
|
||||
result = {}
|
||||
for k, v in iteritems(port_bindings):
|
||||
key = str(k)
|
||||
if '/' not in key:
|
||||
key += '/tcp'
|
||||
if isinstance(v, list):
|
||||
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def convert_volume_binds(binds):
|
||||
if isinstance(binds, list):
|
||||
return binds
|
||||
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(k, binary_type):
|
||||
k = k.decode('utf-8')
|
||||
|
||||
if isinstance(v, dict):
|
||||
if 'ro' in v and 'mode' in v:
|
||||
raise ValueError(
|
||||
'Binding cannot contain both "ro" and "mode": {0}'
|
||||
.format(repr(v))
|
||||
)
|
||||
|
||||
bind = v['bind']
|
||||
if isinstance(bind, binary_type):
|
||||
bind = bind.decode('utf-8')
|
||||
|
||||
if 'ro' in v:
|
||||
mode = 'ro' if v['ro'] else 'rw'
|
||||
elif 'mode' in v:
|
||||
mode = v['mode']
|
||||
else:
|
||||
mode = 'rw'
|
||||
|
||||
# NOTE: this is only relevant for Linux hosts
|
||||
# (does not apply in Docker Desktop)
|
||||
propagation_modes = [
|
||||
'rshared',
|
||||
'shared',
|
||||
'rslave',
|
||||
'slave',
|
||||
'rprivate',
|
||||
'private',
|
||||
]
|
||||
if 'propagation' in v and v['propagation'] in propagation_modes:
|
||||
if mode:
|
||||
mode = ','.join([mode, v['propagation']])
|
||||
else:
|
||||
mode = v['propagation']
|
||||
|
||||
result.append(
|
||||
text_type('{0}:{1}:{2}').format(k, bind, mode)
|
||||
)
|
||||
else:
|
||||
if isinstance(v, binary_type):
|
||||
v = v.decode('utf-8')
|
||||
result.append(
|
||||
text_type('{0}:{1}:rw').format(k, v)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def convert_tmpfs_mounts(tmpfs):
|
||||
if isinstance(tmpfs, dict):
|
||||
return tmpfs
|
||||
|
||||
if not isinstance(tmpfs, list):
|
||||
raise ValueError(
|
||||
'Expected tmpfs value to be either a list or a dict, found: {0}'
|
||||
.format(type(tmpfs).__name__)
|
||||
)
|
||||
|
||||
result = {}
|
||||
for mount in tmpfs:
|
||||
if isinstance(mount, string_types):
|
||||
if ":" in mount:
|
||||
name, options = mount.split(":", 1)
|
||||
else:
|
||||
name = mount
|
||||
options = ""
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected item in tmpfs list to be a string, found: {0}"
|
||||
.format(type(mount).__name__)
|
||||
)
|
||||
|
||||
result[name] = options
|
||||
return result
|
||||
|
||||
|
||||
def convert_service_networks(networks):
|
||||
if not networks:
|
||||
return networks
|
||||
if not isinstance(networks, list):
|
||||
raise TypeError('networks parameter must be a list.')
|
||||
|
||||
result = []
|
||||
for n in networks:
|
||||
if isinstance(n, string_types):
|
||||
n = {'Target': n}
|
||||
result.append(n)
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo_name):
|
||||
parts = repo_name.rsplit('@', 1)
|
||||
if len(parts) == 2:
|
||||
return tuple(parts)
|
||||
parts = repo_name.rsplit(':', 1)
|
||||
if len(parts) == 2 and '/' not in parts[1]:
|
||||
return tuple(parts)
|
||||
return repo_name, None
|
||||
|
||||
|
||||
def parse_host(addr, is_win32=False, tls=False):
|
||||
# Sensible defaults
|
||||
if not addr and is_win32:
|
||||
return DEFAULT_NPIPE
|
||||
if not addr or addr.strip() == 'unix://':
|
||||
return DEFAULT_UNIX_SOCKET
|
||||
|
||||
addr = addr.strip()
|
||||
|
||||
parsed_url = urlparse(addr)
|
||||
proto = parsed_url.scheme
|
||||
if not proto or any(x not in string.ascii_letters + '+' for x in proto):
|
||||
# https://bugs.python.org/issue754016
|
||||
parsed_url = urlparse('//' + addr, 'tcp')
|
||||
proto = 'tcp'
|
||||
|
||||
if proto == 'fd':
|
||||
raise errors.DockerException('fd protocol is not implemented')
|
||||
|
||||
# These protos are valid aliases for our library but not for the
|
||||
# official spec
|
||||
if proto == 'http' or proto == 'https':
|
||||
tls = proto == 'https'
|
||||
proto = 'tcp'
|
||||
elif proto == 'http+unix':
|
||||
proto = 'unix'
|
||||
|
||||
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
|
||||
raise errors.DockerException(
|
||||
"Invalid bind address protocol: {0}".format(addr)
|
||||
)
|
||||
|
||||
if proto == 'tcp' and not parsed_url.netloc:
|
||||
# "tcp://" is exceptionally disallowed by convention;
|
||||
# omitting a hostname for other protocols is fine
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {0}'.format(addr)
|
||||
)
|
||||
|
||||
if any([
|
||||
parsed_url.params, parsed_url.query, parsed_url.fragment,
|
||||
parsed_url.password
|
||||
]):
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {0}'.format(addr)
|
||||
)
|
||||
|
||||
if parsed_url.path and proto == 'ssh':
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: no path allowed for this protocol:'
|
||||
' {0}'.format(addr)
|
||||
)
|
||||
else:
|
||||
path = parsed_url.path
|
||||
if proto == 'unix' and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = '/'.join((parsed_url.hostname, path))
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ('tcp', 'ssh'):
|
||||
port = parsed_url.port or 0
|
||||
if port <= 0:
|
||||
port = 22 if proto == 'ssh' else (2375 if tls else 2376)
|
||||
netloc = '{0}:{1}'.format(parsed_url.netloc, port)
|
||||
|
||||
if not parsed_url.hostname:
|
||||
netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port)
|
||||
|
||||
# Rewrite schemes to fit library internals (requests adapters)
|
||||
if proto == 'tcp':
|
||||
proto = 'http{0}'.format('s' if tls else '')
|
||||
elif proto == 'unix':
|
||||
proto = 'http+unix'
|
||||
|
||||
if proto in ('http+unix', 'npipe'):
|
||||
return "{0}://{1}".format(proto, path).rstrip('/')
|
||||
return urlunparse(URLComponents(
|
||||
scheme=proto,
|
||||
netloc=netloc,
|
||||
url=path,
|
||||
params='',
|
||||
query='',
|
||||
fragment='',
|
||||
)).rstrip('/')
|
||||
|
||||
|
||||
def parse_devices(devices):
|
||||
device_list = []
|
||||
for device in devices:
|
||||
if isinstance(device, dict):
|
||||
device_list.append(device)
|
||||
continue
|
||||
if not isinstance(device, string_types):
|
||||
raise errors.DockerException(
|
||||
'Invalid device type {0}'.format(type(device))
|
||||
)
|
||||
device_mapping = device.split(':')
|
||||
if device_mapping:
|
||||
path_on_host = device_mapping[0]
|
||||
if len(device_mapping) > 1:
|
||||
path_in_container = device_mapping[1]
|
||||
else:
|
||||
path_in_container = path_on_host
|
||||
if len(device_mapping) > 2:
|
||||
permissions = device_mapping[2]
|
||||
else:
|
||||
permissions = 'rwm'
|
||||
device_list.append({
|
||||
'PathOnHost': path_on_host,
|
||||
'PathInContainer': path_in_container,
|
||||
'CgroupPermissions': permissions
|
||||
})
|
||||
return device_list
|
||||
|
||||
|
||||
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
|
||||
if not environment:
|
||||
environment = os.environ
|
||||
host = environment.get('DOCKER_HOST')
|
||||
|
||||
# empty string for cert path is the same as unset.
|
||||
cert_path = environment.get('DOCKER_CERT_PATH') or None
|
||||
|
||||
# empty string for tls verify counts as "false".
|
||||
# Any value or 'unset' counts as true.
|
||||
tls_verify = environment.get('DOCKER_TLS_VERIFY')
|
||||
if tls_verify == '':
|
||||
tls_verify = False
|
||||
else:
|
||||
tls_verify = tls_verify is not None
|
||||
enable_tls = cert_path or tls_verify
|
||||
|
||||
params = {}
|
||||
|
||||
if host:
|
||||
params['base_url'] = host
|
||||
|
||||
if not enable_tls:
|
||||
return params
|
||||
|
||||
if not cert_path:
|
||||
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
|
||||
|
||||
if not tls_verify and assert_hostname is None:
|
||||
# assert_hostname is a subset of TLS verification,
|
||||
# so if it is not set already then set it to false.
|
||||
assert_hostname = False
|
||||
|
||||
params['tls'] = TLSConfig(
|
||||
client_cert=(os.path.join(cert_path, 'cert.pem'),
|
||||
os.path.join(cert_path, 'key.pem')),
|
||||
ca_cert=os.path.join(cert_path, 'ca.pem'),
|
||||
verify=tls_verify,
|
||||
ssl_version=ssl_version,
|
||||
assert_hostname=assert_hostname,
|
||||
)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def convert_filters(filters):
|
||||
result = {}
|
||||
for k, v in iteritems(filters):
|
||||
if isinstance(v, bool):
|
||||
v = 'true' if v else 'false'
|
||||
if not isinstance(v, list):
|
||||
v = [v, ]
|
||||
result[k] = [
|
||||
str(item) if not isinstance(item, string_types) else item
|
||||
for item in v
|
||||
]
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def parse_bytes(s):
|
||||
if isinstance(s, integer_types + (float,)):
|
||||
return s
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha():
|
||||
if s[-1] == "b" or s[-1] == "B":
|
||||
s = s[:-1]
|
||||
units = BYTE_UNITS
|
||||
suffix = s[-1].lower()
|
||||
|
||||
# Check if the variable is a string representation of an int
|
||||
# without a units part. Assuming that the units are bytes.
|
||||
if suffix.isdigit():
|
||||
digits_part = s
|
||||
suffix = 'b'
|
||||
else:
|
||||
digits_part = s[:-1]
|
||||
|
||||
if suffix in units.keys() or suffix.isdigit():
|
||||
try:
|
||||
digits = float(digits_part)
|
||||
except ValueError:
|
||||
raise errors.DockerException(
|
||||
'Failed converting the string value for memory ({0}) to'
|
||||
' an integer.'.format(digits_part)
|
||||
)
|
||||
|
||||
# Reconvert to long for the final result
|
||||
s = int(digits * units[suffix])
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'The specified value for memory ({0}) should specify the'
|
||||
' units. The postfix should be one of the `b` `k` `m` `g`'
|
||||
' characters'.format(s)
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def normalize_links(links):
|
||||
if isinstance(links, dict):
|
||||
links = iteritems(links)
|
||||
|
||||
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
|
||||
|
||||
|
||||
def parse_env_file(env_file):
|
||||
"""
|
||||
Reads a line-separated environment file.
|
||||
The format of each line should be "key=value".
|
||||
"""
|
||||
environment = {}
|
||||
|
||||
with open(env_file, 'r') as f:
|
||||
for line in f:
|
||||
|
||||
if line[0] == '#':
|
||||
continue
|
||||
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parse_line = line.split('=', 1)
|
||||
if len(parse_line) == 2:
|
||||
k, v = parse_line
|
||||
environment[k] = v
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'Invalid line in environment file {0}:\n{1}'.format(
|
||||
env_file, line))
|
||||
|
||||
return environment
|
||||
|
||||
|
||||
def split_command(command):
|
||||
if PY2 and not isinstance(command, binary_type):
|
||||
command = command.encode('utf-8')
|
||||
return shlex.split(command)
|
||||
|
||||
|
||||
def format_environment(environment):
|
||||
def format_env(key, value):
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, binary_type):
|
||||
value = value.decode('utf-8')
|
||||
|
||||
return u'{key}={value}'.format(key=key, value=value)
|
||||
return [format_env(*var) for var in iteritems(environment)]
|
||||
|
||||
|
||||
def format_extra_hosts(extra_hosts, task=False):
|
||||
# Use format dictated by Swarm API if container is part of a task
|
||||
if task:
|
||||
return [
|
||||
'{0} {1}'.format(v, k) for k, v in sorted(iteritems(extra_hosts))
|
||||
]
|
||||
|
||||
return [
|
||||
'{0}:{1}'.format(k, v) for k, v in sorted(iteritems(extra_hosts))
|
||||
]
|
||||
|
||||
|
||||
def create_host_config(self, *args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_host_config has been removed. Please use a '
|
||||
'docker.types.HostConfig object instead.'
|
||||
)
|
@ -0,0 +1,208 @@
|
||||
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Parse go logfmt messages.
|
||||
|
||||
See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc
|
||||
# (look for "EBNFish")
|
||||
|
||||
|
||||
class InvalidLogFmt(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _Mode(object):
|
||||
GARBAGE = 0
|
||||
KEY = 1
|
||||
EQUAL = 2
|
||||
IDENT_VALUE = 3
|
||||
QUOTED_VALUE = 4
|
||||
|
||||
|
||||
_ESCAPE_DICT = {
|
||||
'"': '"',
|
||||
'\\': '\\',
|
||||
"'": "'",
|
||||
'/': '/',
|
||||
'b': '\b',
|
||||
'f': '\f',
|
||||
'n': '\n',
|
||||
'r': '\r',
|
||||
't': '\t',
|
||||
}
|
||||
|
||||
_HEX_DICT = {
|
||||
'0': 0,
|
||||
'1': 1,
|
||||
'2': 2,
|
||||
'3': 3,
|
||||
'4': 4,
|
||||
'5': 5,
|
||||
'6': 6,
|
||||
'7': 7,
|
||||
'8': 8,
|
||||
'9': 9,
|
||||
'a': 0xA,
|
||||
'b': 0xB,
|
||||
'c': 0xC,
|
||||
'd': 0xD,
|
||||
'e': 0xE,
|
||||
'f': 0xF,
|
||||
'A': 0xA,
|
||||
'B': 0xB,
|
||||
'C': 0xC,
|
||||
'D': 0xD,
|
||||
'E': 0xE,
|
||||
'F': 0xF,
|
||||
}
|
||||
|
||||
|
||||
def _is_ident(cur):
|
||||
return cur > ' ' and cur not in ('"', '=')
|
||||
|
||||
|
||||
class _Parser(object):
|
||||
def __init__(self, line):
|
||||
self.line = line
|
||||
self.index = 0
|
||||
self.length = len(line)
|
||||
|
||||
def done(self):
|
||||
return self.index >= self.length
|
||||
|
||||
def cur(self):
|
||||
return self.line[self.index]
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
|
||||
def prev(self):
|
||||
self.index -= 1
|
||||
|
||||
def parse_unicode_sequence(self):
|
||||
if self.index + 6 > self.length:
|
||||
raise InvalidLogFmt('Not enough space for unicode escape')
|
||||
if self.line[self.index:self.index + 2] != '\\u':
|
||||
raise InvalidLogFmt('Invalid unicode escape start')
|
||||
v = 0
|
||||
for i in range(self.index + 2, self.index + 6):
|
||||
v <<= 4
|
||||
try:
|
||||
v += _HEX_DICT[self.line[self.index]]
|
||||
except KeyError:
|
||||
raise InvalidLogFmt('Invalid unicode escape digit {digit!r}'.format(digit=self.line[self.index]))
|
||||
self.index += 6
|
||||
return chr(v)
|
||||
|
||||
|
||||
def parse_line(line, logrus_mode=False):
|
||||
result = {}
|
||||
parser = _Parser(line)
|
||||
key = []
|
||||
value = []
|
||||
mode = _Mode.GARBAGE
|
||||
|
||||
def handle_kv(has_no_value=False):
|
||||
k = ''.join(key)
|
||||
v = None if has_no_value else ''.join(value)
|
||||
result[k] = v
|
||||
del key[:]
|
||||
del value[:]
|
||||
|
||||
def parse_garbage(cur):
|
||||
if _is_ident(cur):
|
||||
return _Mode.KEY
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_key(cur):
|
||||
if _is_ident(cur):
|
||||
key.append(cur)
|
||||
parser.next()
|
||||
return _Mode.KEY
|
||||
elif cur == '=':
|
||||
parser.next()
|
||||
return _Mode.EQUAL
|
||||
else:
|
||||
if logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
handle_kv(has_no_value=True)
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_equal(cur):
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
elif cur == '"':
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
else:
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_ident_value(cur):
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
else:
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_quoted_value(cur):
|
||||
if cur == '\\':
|
||||
parser.next()
|
||||
if parser.done():
|
||||
raise InvalidLogFmt('Unterminated escape sequence in quoted string')
|
||||
cur = parser.cur()
|
||||
if cur in _ESCAPE_DICT:
|
||||
value.append(_ESCAPE_DICT[cur])
|
||||
elif cur != 'u':
|
||||
raise InvalidLogFmt('Unknown escape sequence {seq!r}'.format(seq='\\' + cur))
|
||||
else:
|
||||
parser.prev()
|
||||
value.append(parser.parse_unicode_sequence())
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
elif cur == '"':
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
elif cur < ' ':
|
||||
raise InvalidLogFmt('Control characters in quoted string are not allowed')
|
||||
else:
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
|
||||
parsers = {
|
||||
_Mode.GARBAGE: parse_garbage,
|
||||
_Mode.KEY: parse_key,
|
||||
_Mode.EQUAL: parse_equal,
|
||||
_Mode.IDENT_VALUE: parse_ident_value,
|
||||
_Mode.QUOTED_VALUE: parse_quoted_value,
|
||||
}
|
||||
while not parser.done():
|
||||
mode = parsers[mode](parser.cur())
|
||||
if mode == _Mode.KEY and logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
if mode == _Mode.KEY or mode == _Mode.EQUAL:
|
||||
handle_kv(has_no_value=True)
|
||||
elif mode == _Mode.IDENT_VALUE:
|
||||
handle_kv()
|
||||
elif mode == _Mode.QUOTED_VALUE:
|
||||
raise InvalidLogFmt('Unterminated quoted string')
|
||||
return result
|
@ -0,0 +1,179 @@
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on containerd's platforms Go module
|
||||
# (https://github.com/containerd/containerd/tree/main/platforms)
|
||||
#
|
||||
# Copyright (c) 2023 Felix Fontein <felix@fontein.de>
|
||||
# Copyright The containerd Authors
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
|
||||
_VALID_STR = re.compile('^[A-Za-z0-9_-]+$')
|
||||
|
||||
|
||||
def _validate_part(string, part, part_name):
|
||||
if not part:
|
||||
raise ValueError('Invalid platform string "{string}": {part} is empty'.format(string=string, part=part_name))
|
||||
if not _VALID_STR.match(part):
|
||||
raise ValueError('Invalid platform string "{string}": {part} has invalid characters'.format(string=string, part=part_name))
|
||||
return part
|
||||
|
||||
|
||||
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L32-L38
|
||||
_KNOWN_OS = (
|
||||
"aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js",
|
||||
"linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos",
|
||||
)
|
||||
|
||||
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L54-L60
|
||||
_KNOWN_ARCH = (
|
||||
"386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le",
|
||||
"loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le",
|
||||
"ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm",
|
||||
)
|
||||
|
||||
|
||||
def _normalize_os(os_str):
|
||||
# See normalizeOS() in https://github.com/containerd/containerd/blob/main/platforms/database.go
|
||||
os_str = os_str.lower()
|
||||
if os_str == 'macos':
|
||||
os_str = 'darwin'
|
||||
return os_str
|
||||
|
||||
|
||||
_NORMALIZE_ARCH = {
|
||||
("i386", None): ("386", ""),
|
||||
("x86_64", "v1"): ("amd64", ""),
|
||||
("x86-64", "v1"): ("amd64", ""),
|
||||
("amd64", "v1"): ("amd64", ""),
|
||||
("x86_64", None): ("amd64", None),
|
||||
("x86-64", None): ("amd64", None),
|
||||
("amd64", None): ("amd64", None),
|
||||
("aarch64", "8"): ("arm64", ""),
|
||||
("arm64", "8"): ("arm64", ""),
|
||||
("aarch64", "v8"): ("arm64", ""),
|
||||
("arm64", "v8"): ("arm64", ""),
|
||||
("aarch64", None): ("arm64", None),
|
||||
("arm64", None): ("arm64", None),
|
||||
("armhf", None): ("arm", "v7"),
|
||||
("armel", None): ("arm", "v6"),
|
||||
("arm", ""): ("arm", "v7"),
|
||||
("arm", "5"): ("arm", "v5"),
|
||||
("arm", "6"): ("arm", "v6"),
|
||||
("arm", "7"): ("arm", "v7"),
|
||||
("arm", "8"): ("arm", "v8"),
|
||||
("arm", None): ("arm", None),
|
||||
}
|
||||
|
||||
|
||||
def _normalize_arch(arch_str, variant_str):
|
||||
# See normalizeArch() in https://github.com/containerd/containerd/blob/main/platforms/database.go
|
||||
arch_str = arch_str.lower()
|
||||
variant_str = variant_str.lower()
|
||||
res = _NORMALIZE_ARCH.get((arch_str, variant_str))
|
||||
if res is None:
|
||||
res = _NORMALIZE_ARCH.get((arch_str, None))
|
||||
if res is None:
|
||||
return arch_str, variant_str
|
||||
if res is not None:
|
||||
arch_str = res[0]
|
||||
if res[1] is not None:
|
||||
variant_str = res[1]
|
||||
return arch_str, variant_str
|
||||
|
||||
|
||||
class _Platform(object):
|
||||
def __init__(self, os=None, arch=None, variant=None):
|
||||
self.os = os
|
||||
self.arch = arch
|
||||
self.variant = variant
|
||||
if variant is not None:
|
||||
if arch is None:
|
||||
raise ValueError('If variant is given, architecture must be given too')
|
||||
if os is None:
|
||||
raise ValueError('If variant is given, os must be given too')
|
||||
|
||||
@classmethod
|
||||
def parse_platform_string(cls, string, daemon_os=None, daemon_arch=None):
|
||||
# See Parse() in https://github.com/containerd/containerd/blob/main/platforms/platforms.go
|
||||
if string is None:
|
||||
return cls()
|
||||
if not string:
|
||||
raise ValueError('Platform string must be non-empty')
|
||||
parts = string.split('/', 2)
|
||||
arch = None
|
||||
variant = None
|
||||
if len(parts) == 1:
|
||||
_validate_part(string, string, 'OS/architecture')
|
||||
# The part is either OS or architecture
|
||||
os = _normalize_os(string)
|
||||
if os in _KNOWN_OS:
|
||||
if daemon_arch is not None:
|
||||
arch, variant = _normalize_arch(daemon_arch, '')
|
||||
return cls(os=os, arch=arch, variant=variant)
|
||||
arch, variant = _normalize_arch(os, '')
|
||||
if arch in _KNOWN_ARCH:
|
||||
return cls(
|
||||
os=_normalize_os(daemon_os) if daemon_os else None,
|
||||
arch=arch or None,
|
||||
variant=variant or None,
|
||||
)
|
||||
raise ValueError('Invalid platform string "{0}": unknown OS or architecture'.format(string))
|
||||
os = _validate_part(string, parts[0], 'OS')
|
||||
if not os:
|
||||
raise ValueError('Invalid platform string "{0}": OS is empty'.format(string))
|
||||
arch = _validate_part(string, parts[1], 'architecture') if len(parts) > 1 else None
|
||||
if arch is not None and not arch:
|
||||
raise ValueError('Invalid platform string "{0}": architecture is empty'.format(string))
|
||||
variant = _validate_part(string, parts[2], 'variant') if len(parts) > 2 else None
|
||||
if variant is not None and not variant:
|
||||
raise ValueError('Invalid platform string "{0}": variant is empty'.format(string))
|
||||
arch, variant = _normalize_arch(arch, variant or '')
|
||||
if len(parts) == 2 and arch == 'arm' and variant == 'v7':
|
||||
variant = None
|
||||
if len(parts) == 3 and arch == 'arm64' and variant == '':
|
||||
variant = 'v8'
|
||||
return cls(os=_normalize_os(os), arch=arch, variant=variant or None)
|
||||
|
||||
def __str__(self):
|
||||
if self.variant:
|
||||
parts = [self.os, self.arch, self.variant]
|
||||
elif self.os:
|
||||
if self.arch:
|
||||
parts = [self.os, self.arch]
|
||||
else:
|
||||
parts = [self.os]
|
||||
elif self.arch is not None:
|
||||
parts = [self.arch]
|
||||
else:
|
||||
parts = []
|
||||
return '/'.join(parts)
|
||||
|
||||
def __repr__(self):
|
||||
return '_Platform(os={os!r}, arch={arch!r}, variant={variant!r})'.format(os=self.os, arch=self.arch, variant=self.variant)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.os == other.os and self.arch == other.arch and self.variant == other.variant
|
||||
|
||||
|
||||
def normalize_platform_string(string, daemon_os=None, daemon_arch=None):
|
||||
return str(_Platform.parse_platform_string(string, daemon_os=daemon_os, daemon_arch=daemon_arch))
|
||||
|
||||
|
||||
def compose_platform_string(os=None, arch=None, variant=None, daemon_os=None, daemon_arch=None):
|
||||
if os is None and daemon_os is not None:
|
||||
os = _normalize_os(daemon_os)
|
||||
if arch is None and daemon_arch is not None:
|
||||
arch, variant = _normalize_arch(daemon_arch, variant or '')
|
||||
variant = variant or None
|
||||
return str(_Platform(os=os, arch=arch, variant=variant or None))
|
||||
|
||||
|
||||
def compare_platform_strings(string1, string2):
|
||||
return _Platform.parse_platform_string(string1) == _Platform.parse_platform_string(string2)
|
@ -0,0 +1,56 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import random
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
|
||||
def generate_insecure_key():
|
||||
'''Do NOT use this for cryptographic purposes!'''
|
||||
while True:
|
||||
# Generate a one-byte key. Right now the functions below do not use more
|
||||
# than one byte, so this is sufficient.
|
||||
if PY2:
|
||||
key = chr(random.randint(0, 255))
|
||||
else:
|
||||
key = bytes([random.randint(0, 255)])
|
||||
# Return anything that is not zero
|
||||
if key != b'\x00':
|
||||
return key
|
||||
|
||||
|
||||
def scramble(value, key):
|
||||
'''Do NOT use this for cryptographic purposes!'''
|
||||
if len(key) < 1:
|
||||
raise ValueError('Key must be at least one byte')
|
||||
value = to_bytes(value)
|
||||
if PY2:
|
||||
k = ord(key[0])
|
||||
value = b''.join([chr(k ^ ord(b)) for b in value])
|
||||
else:
|
||||
k = key[0]
|
||||
value = bytes([k ^ b for b in value])
|
||||
return '=S=' + to_native(base64.b64encode(value))
|
||||
|
||||
|
||||
def unscramble(value, key):
|
||||
'''Do NOT use this for cryptographic purposes!'''
|
||||
if len(key) < 1:
|
||||
raise ValueError('Key must be at least one byte')
|
||||
if not value.startswith(u'=S='):
|
||||
raise ValueError('Value does not start with indicator')
|
||||
value = base64.b64decode(value[3:])
|
||||
if PY2:
|
||||
k = ord(key[0])
|
||||
value = b''.join([chr(k ^ ord(b)) for b in value])
|
||||
else:
|
||||
k = key[0]
|
||||
value = bytes([k ^ b for b in value])
|
||||
return to_text(value)
|
@ -0,0 +1,700 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import abc
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
HAS_DOCKER_PY = True
|
||||
HAS_DOCKER_PY_2 = False
|
||||
HAS_DOCKER_PY_3 = False
|
||||
HAS_DOCKER_ERROR = None
|
||||
HAS_DOCKER_TRACEBACK = None
|
||||
|
||||
try:
|
||||
from requests.exceptions import SSLError
|
||||
from docker import __version__ as docker_version
|
||||
from docker.errors import APIError, NotFound, TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
from docker import auth
|
||||
|
||||
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
|
||||
HAS_DOCKER_PY_3 = True
|
||||
from docker import APIClient as Client
|
||||
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
|
||||
HAS_DOCKER_PY_2 = True
|
||||
from docker import APIClient as Client
|
||||
else:
|
||||
from docker import Client
|
||||
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc)
|
||||
HAS_DOCKER_TRACEBACK = traceback.format_exc()
|
||||
HAS_DOCKER_PY = False
|
||||
|
||||
|
||||
# The next two imports ``docker.models`` and ``docker.ssladapter`` are used
|
||||
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
|
||||
# installed, as they utilize the same namespace are are incompatible
|
||||
try:
|
||||
# docker (Docker SDK for Python >= 2.0.0)
|
||||
import docker.models # noqa: F401, pylint: disable=unused-import
|
||||
HAS_DOCKER_MODELS = True
|
||||
except ImportError:
|
||||
HAS_DOCKER_MODELS = False
|
||||
|
||||
try:
|
||||
# docker-py (Docker SDK for Python < 2.0.0)
|
||||
import docker.ssladapter # noqa: F401, pylint: disable=unused-import
|
||||
HAS_DOCKER_SSLADAPTER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER_SSLADAPTER = False
|
||||
|
||||
|
||||
try:
|
||||
from requests.exceptions import RequestException # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
|
||||
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
|
||||
# class RequestException so that our code does not break.
|
||||
class RequestException(Exception):
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DEFAULT_TLS_HOSTNAME, # TODO: remove
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_COMMON_ARGS_VARS, # TODO: remove
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
DEFAULT_DOCKER_REGISTRY, # TODO: remove
|
||||
BYTE_SUFFIXES, # TODO: remove
|
||||
is_image_name_id, # TODO: remove
|
||||
is_valid_tag, # TODO: remove
|
||||
sanitize_result,
|
||||
DockerBaseClass, # TODO: remove
|
||||
update_tls_hostname,
|
||||
compare_dict_allow_more_present, # TODO: remove
|
||||
compare_generic, # TODO: remove
|
||||
DifferenceTracker, # TODO: remove
|
||||
clean_dict_booleans_for_docker_api, # TODO: remove
|
||||
convert_duration_to_nanosecond, # TODO: remove
|
||||
parse_healthcheck, # TODO: remove
|
||||
omit_none_from_dict, # TODO: remove
|
||||
)
|
||||
|
||||
|
||||
MIN_DOCKER_VERSION = "1.8.0"
|
||||
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
docker_version = None
|
||||
|
||||
# No Docker SDK for Python. Create a place holder client to allow
|
||||
# instantiation of AnsibleModule and proper error handing
|
||||
class Client(object): # noqa: F811
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
class APIError(Exception): # noqa: F811
|
||||
pass
|
||||
|
||||
class NotFound(Exception): # noqa: F811
|
||||
pass
|
||||
|
||||
|
||||
def _get_tls_config(fail_function, **kwargs):
|
||||
if 'assert_hostname' in kwargs and LooseVersion(docker_version) >= LooseVersion('7.0.0b1'):
|
||||
assert_hostname = kwargs.pop('assert_hostname')
|
||||
if assert_hostname is not None:
|
||||
fail_function(
|
||||
"tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using"
|
||||
" Docker SDK for Python {docker_py_version}. The tls_hostname option (value: {tls_hostname})"
|
||||
" has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME."
|
||||
" Make sure it is not set, or switch to an older version of Docker SDK for Python.".format(
|
||||
docker_py_version=docker_version,
|
||||
tls_hostname=assert_hostname,
|
||||
)
|
||||
)
|
||||
# Filter out all None parameters
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
try:
|
||||
tls_config = TLSConfig(**kwargs)
|
||||
return tls_config
|
||||
except TLSParameterError as exc:
|
||||
fail_function("TLS config error: %s" % exc)
|
||||
|
||||
|
||||
def is_using_tls(auth):
|
||||
return auth['tls_verify'] or auth['tls']
|
||||
|
||||
|
||||
def get_connect_params(auth, fail_function):
|
||||
if is_using_tls(auth):
|
||||
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
|
||||
|
||||
result = dict(
|
||||
base_url=auth['docker_host'],
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'],
|
||||
)
|
||||
|
||||
if auth['tls_verify']:
|
||||
# TLS with verification
|
||||
tls_config = dict(
|
||||
verify=True,
|
||||
assert_hostname=auth['tls_hostname'],
|
||||
fail_function=fail_function,
|
||||
)
|
||||
if auth['cert_path'] and auth['key_path']:
|
||||
tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
|
||||
if auth['cacert_path']:
|
||||
tls_config['ca_cert'] = auth['cacert_path']
|
||||
result['tls'] = _get_tls_config(**tls_config)
|
||||
elif auth['tls']:
|
||||
# TLS without verification
|
||||
tls_config = dict(
|
||||
verify=False,
|
||||
fail_function=fail_function,
|
||||
)
|
||||
if auth['cert_path'] and auth['key_path']:
|
||||
tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
|
||||
result['tls'] = _get_tls_config(**tls_config)
|
||||
|
||||
if auth.get('use_ssh_client'):
|
||||
if LooseVersion(docker_version) < LooseVersion('4.4.0'):
|
||||
fail_function("use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer")
|
||||
result['use_ssh_client'] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
|
||||
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
|
||||
DOCKERPYUPGRADE_RECOMMEND_DOCKER = "Use `pip install --upgrade docker-py` to upgrade."
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(self, min_docker_version=None, min_docker_api_version=None):
|
||||
if min_docker_version is None:
|
||||
min_docker_version = MIN_DOCKER_VERSION
|
||||
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
|
||||
|
||||
self.docker_py_version = LooseVersion(docker_version)
|
||||
|
||||
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
|
||||
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
|
||||
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
|
||||
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
|
||||
"python module (for %s's Python %s). It is recommended to install the docker module. Please "
|
||||
"note that simply uninstalling one of the modules can leave the other module in a broken "
|
||||
"state." % (platform.node(), sys.executable))
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0 (Python >= 3.6) or "
|
||||
"docker<5.0.0 (Python 2.7)")
|
||||
msg = msg + ", for example via `pip install docker` (Python >= 3.6) or " \
|
||||
+ "`pip install docker==4.4.4` (Python 2.7). The error was: %s"
|
||||
self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK)
|
||||
|
||||
if self.docker_py_version < LooseVersion(min_docker_version):
|
||||
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
|
||||
if not NEEDS_DOCKER_PY2:
|
||||
# The minimal required version is < 2.0 (and the current version as well).
|
||||
# Advertise docker (instead of docker-py).
|
||||
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
|
||||
elif docker_version < LooseVersion('2.0'):
|
||||
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
|
||||
else:
|
||||
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
|
||||
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
|
||||
|
||||
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
|
||||
|
||||
try:
|
||||
super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except APIError as exc:
|
||||
self.fail("Docker API error: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error connecting: %s" % exc)
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or '1.25'
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
|
||||
|
||||
def log(self, msg, pretty_print=False):
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg, **kwargs):
|
||||
pass
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(param_name, param_value, env_variable, default_value, type='str'):
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if type == 'bool':
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if type == 'int':
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == 'cert_path':
|
||||
return os.path.join(env_value, 'cert.pem')
|
||||
if param_name == 'cacert_path':
|
||||
return os.path.join(env_value, 'ca.pem')
|
||||
if param_name == 'key_path':
|
||||
return os.path.join(env_value, 'key.pem')
|
||||
if type == 'bool':
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if type == 'int':
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self):
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log('Getting credentials')
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = dict()
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = dict(
|
||||
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
|
||||
DEFAULT_DOCKER_HOST, type='str'),
|
||||
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
|
||||
'DOCKER_TLS_HOSTNAME', None, type='str'),
|
||||
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
|
||||
'auto', type='str'),
|
||||
cacert_path=self._get_value('cacert_path', params['ca_path'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
|
||||
tls_verify=self._get_value('validate_certs', params['validate_certs'], 'DOCKER_TLS_VERIFY',
|
||||
DEFAULT_TLS_VERIFY, type='bool'),
|
||||
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
|
||||
DEFAULT_TIMEOUT_SECONDS, type='int'),
|
||||
use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
|
||||
)
|
||||
|
||||
update_tls_hostname(result)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error):
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
|
||||
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
|
||||
self.fail("SSL Exception: %s" % (error))
|
||||
|
||||
def get_container_by_id(self, container_id):
|
||||
try:
|
||||
self.log("Inspecting container Id %s" % container_id)
|
||||
result = self.inspect_container(container=container_id)
|
||||
self.log("Completed container inspection")
|
||||
return result
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting container: %s" % exc)
|
||||
|
||||
def get_container(self, name=None):
|
||||
'''
|
||||
Lookup a container and return the inspection results.
|
||||
'''
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
search_name = name
|
||||
if not name.startswith('/'):
|
||||
search_name = '/' + name
|
||||
|
||||
result = None
|
||||
try:
|
||||
for container in self.containers(all=True):
|
||||
self.log("testing container: %s" % (container['Names']))
|
||||
if isinstance(container['Names'], list) and search_name in container['Names']:
|
||||
result = container
|
||||
break
|
||||
if container['Id'].startswith(name):
|
||||
result = container
|
||||
break
|
||||
if container['Id'] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving container list: %s" % exc)
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return self.get_container_by_id(result['Id'])
|
||||
|
||||
def get_network(self, name=None, network_id=None):
|
||||
'''
|
||||
Lookup a network and return the inspection results.
|
||||
'''
|
||||
if name is None and network_id is None:
|
||||
return None
|
||||
|
||||
result = None
|
||||
|
||||
if network_id is None:
|
||||
try:
|
||||
for network in self.networks():
|
||||
self.log("testing network: %s" % (network['Name']))
|
||||
if name == network['Name']:
|
||||
result = network
|
||||
break
|
||||
if network['Id'].startswith(name):
|
||||
result = network
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving network list: %s" % exc)
|
||||
|
||||
if result is not None:
|
||||
network_id = result['Id']
|
||||
|
||||
if network_id is not None:
|
||||
try:
|
||||
self.log("Inspecting network Id %s" % network_id)
|
||||
result = self.inspect_network(network_id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting network: %s" % exc)
|
||||
|
||||
return result
|
||||
|
||||
def find_image(self, name, tag):
|
||||
'''
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
'''
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log("Find image %s:%s" % (name, tag))
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
if registry == 'docker.io':
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log("Check for docker.io image: %s" % repo_name)
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith('library/'):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len('library/'):]
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = "%s/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and '/' not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = "%s/library/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
|
||||
|
||||
if len(images) == 1:
|
||||
try:
|
||||
inspection = self.inspect_image(images[0]['Id'])
|
||||
except NotFound:
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
|
||||
return inspection
|
||||
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
|
||||
def find_image_by_id(self, image_id, accept_missing_image=False):
|
||||
'''
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
'''
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log("Find image %s (by ID)" % image_id)
|
||||
try:
|
||||
inspection = self.inspect_image(image_id)
|
||||
except NotFound as exc:
|
||||
if not accept_missing_image:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
|
||||
self.log("Image %s not found." % image_id)
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
|
||||
return inspection
|
||||
|
||||
def _image_lookup(self, name, tag):
|
||||
'''
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
'''
|
||||
try:
|
||||
response = self.images(name=name)
|
||||
except Exception as exc:
|
||||
self.fail("Error searching for image %s - %s" % (name, str(exc)))
|
||||
images = response
|
||||
if tag:
|
||||
lookup = "%s:%s" % (name, tag)
|
||||
lookup_digest = "%s@%s" % (name, tag)
|
||||
images = []
|
||||
for image in response:
|
||||
tags = image.get('RepoTags')
|
||||
digests = image.get('RepoDigests')
|
||||
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def pull_image(self, name, tag="latest", platform=None):
|
||||
'''
|
||||
Pull an image
|
||||
'''
|
||||
kwargs = dict(
|
||||
tag=tag,
|
||||
stream=True,
|
||||
decode=True,
|
||||
)
|
||||
if platform is not None:
|
||||
kwargs['platform'] = platform
|
||||
self.log("Pulling image %s:%s" % (name, tag))
|
||||
old_tag = self.find_image(name, tag)
|
||||
try:
|
||||
for line in self.pull(name, **kwargs):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
error_detail = line.get('errorDetail')
|
||||
self.fail("Error pulling %s - code: %s message: %s" % (name,
|
||||
error_detail.get('code'),
|
||||
error_detail.get('message')))
|
||||
else:
|
||||
self.fail("Error pulling %s - %s" % (name, line.get('error')))
|
||||
except Exception as exc:
|
||||
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
|
||||
|
||||
new_tag = self.find_image(name, tag)
|
||||
|
||||
return new_tag, old_tag == new_tag
|
||||
|
||||
def inspect_distribution(self, image, **kwargs):
|
||||
'''
|
||||
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
|
||||
since prior versions did not support accessing private repositories.
|
||||
'''
|
||||
if self.docker_py_version < LooseVersion('4.0.0'):
|
||||
registry = auth.resolve_repository_name(image)[0]
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
return self._result(self._get(
|
||||
self._url('/distribution/{0}/json', image),
|
||||
headers={'X-Registry-Auth': header}
|
||||
), json=True)
|
||||
return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
|
||||
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
|
||||
required_together=None, required_if=None, required_one_of=None, required_by=None,
|
||||
min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None,
|
||||
option_minimal_versions_ignore_params=None, fail_results=None):
|
||||
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get('debug')
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super(AnsibleDockerClient, self).__init__(
|
||||
min_docker_version=min_docker_version,
|
||||
min_docker_api_version=min_docker_api_version)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||
|
||||
def _get_params(self):
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
|
||||
self.option_minimal_versions = dict()
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None:
|
||||
if option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = dict()
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_py = True
|
||||
support_docker_api = True
|
||||
if 'docker_py_version' in data:
|
||||
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
|
||||
if 'docker_api_version' in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
|
||||
data['supported'] = support_docker_py and support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data['supported']:
|
||||
# Test whether option is specified
|
||||
if 'detect_usage' in data:
|
||||
used = data['detect_usage'](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and 'default' in self.module.argument_spec[option]:
|
||||
used = self.module.params[option] != self.module.argument_spec[option]['default']
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if 'usage_msg' in data:
|
||||
usg = data['usage_msg']
|
||||
else:
|
||||
usg = 'set %s option' % (option, )
|
||||
if not support_docker_api:
|
||||
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
|
||||
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
|
||||
elif not support_docker_py:
|
||||
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
|
||||
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
|
||||
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
|
||||
elif self.docker_py_version < LooseVersion('2.0.0'):
|
||||
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
|
||||
else:
|
||||
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
|
||||
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
|
||||
else:
|
||||
# should not happen
|
||||
msg = 'Cannot %s with your configuration.' % (usg, )
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(self, result, warnings_key=None):
|
||||
'''
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
'''
|
||||
if warnings_key is None:
|
||||
warnings_key = ['Warnings']
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn('Docker warning: {0}'.format(warning))
|
||||
elif isinstance(result, string_types) and result:
|
||||
self.module.warn('Docker warning: {0}'.format(result))
|
@ -0,0 +1,584 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import abc
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common._collections_compat import Mapping, Sequence
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
try:
|
||||
from requests.exceptions import RequestException, SSLError # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
# Define an exception class RequestException so that our code does not break.
|
||||
class RequestException(Exception):
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient as Client
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
NotFound,
|
||||
MissingRequirementException,
|
||||
TLSParameterError,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
convert_filters,
|
||||
parse_repository_tag,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DEFAULT_TLS_HOSTNAME, # TODO: remove
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
DEFAULT_DOCKER_REGISTRY, # TODO: remove
|
||||
is_image_name_id, # TODO: remove
|
||||
is_valid_tag, # TODO: remove
|
||||
sanitize_result,
|
||||
update_tls_hostname,
|
||||
)
|
||||
|
||||
|
||||
def _get_tls_config(fail_function, **kwargs):
|
||||
try:
|
||||
tls_config = TLSConfig(**kwargs)
|
||||
return tls_config
|
||||
except TLSParameterError as exc:
|
||||
fail_function("TLS config error: %s" % exc)
|
||||
|
||||
|
||||
def is_using_tls(auth_data):
|
||||
return auth_data['tls_verify'] or auth_data['tls']
|
||||
|
||||
|
||||
def get_connect_params(auth_data, fail_function):
|
||||
if is_using_tls(auth_data):
|
||||
auth_data['docker_host'] = auth_data['docker_host'].replace('tcp://', 'https://')
|
||||
|
||||
result = dict(
|
||||
base_url=auth_data['docker_host'],
|
||||
version=auth_data['api_version'],
|
||||
timeout=auth_data['timeout'],
|
||||
)
|
||||
|
||||
if auth_data['tls_verify']:
|
||||
# TLS with verification
|
||||
tls_config = dict(
|
||||
verify=True,
|
||||
assert_hostname=auth_data['tls_hostname'],
|
||||
fail_function=fail_function,
|
||||
)
|
||||
if auth_data['cert_path'] and auth_data['key_path']:
|
||||
tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
|
||||
if auth_data['cacert_path']:
|
||||
tls_config['ca_cert'] = auth_data['cacert_path']
|
||||
result['tls'] = _get_tls_config(**tls_config)
|
||||
elif auth_data['tls']:
|
||||
# TLS without verification
|
||||
tls_config = dict(
|
||||
verify=False,
|
||||
fail_function=fail_function,
|
||||
)
|
||||
if auth_data['cert_path'] and auth_data['key_path']:
|
||||
tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
|
||||
result['tls'] = _get_tls_config(**tls_config)
|
||||
|
||||
if auth_data.get('use_ssh_client'):
|
||||
result['use_ssh_client'] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(self, min_docker_api_version=None):
|
||||
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
|
||||
|
||||
try:
|
||||
super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except MissingRequirementException as exc:
|
||||
self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception)
|
||||
except APIError as exc:
|
||||
self.fail("Docker API error: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error connecting: %s" % exc)
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or '1.25'
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
|
||||
|
||||
def log(self, msg, pretty_print=False):
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg, **kwargs):
|
||||
pass
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(param_name, param_value, env_variable, default_value, type='str'):
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if type == 'bool':
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if type == 'int':
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == 'cert_path':
|
||||
return os.path.join(env_value, 'cert.pem')
|
||||
if param_name == 'cacert_path':
|
||||
return os.path.join(env_value, 'ca.pem')
|
||||
if param_name == 'key_path':
|
||||
return os.path.join(env_value, 'key.pem')
|
||||
if type == 'bool':
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if type == 'int':
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self):
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log('Getting credentials')
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = dict()
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = dict(
|
||||
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
|
||||
DEFAULT_DOCKER_HOST, type='str'),
|
||||
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
|
||||
'DOCKER_TLS_HOSTNAME', None, type='str'),
|
||||
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
|
||||
'auto', type='str'),
|
||||
cacert_path=self._get_value('cacert_path', params['ca_path'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
|
||||
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
|
||||
tls_verify=self._get_value('validate_certs', params['validate_certs'], 'DOCKER_TLS_VERIFY',
|
||||
DEFAULT_TLS_VERIFY, type='bool'),
|
||||
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
|
||||
DEFAULT_TIMEOUT_SECONDS, type='int'),
|
||||
use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
|
||||
)
|
||||
|
||||
def depr(*args, **kwargs):
|
||||
self.deprecate(*args, **kwargs)
|
||||
|
||||
update_tls_hostname(result, old_behavior=True, deprecate_function=depr, uses_tls=is_using_tls(result))
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error):
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
|
||||
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
|
||||
self.fail("SSL Exception: %s" % (error))
|
||||
|
||||
def get_container_by_id(self, container_id):
|
||||
try:
|
||||
self.log("Inspecting container Id %s" % container_id)
|
||||
result = self.get_json('/containers/{0}/json', container_id)
|
||||
self.log("Completed container inspection")
|
||||
return result
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting container: %s" % exc)
|
||||
|
||||
def get_container(self, name=None):
|
||||
'''
|
||||
Lookup a container and return the inspection results.
|
||||
'''
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
search_name = name
|
||||
if not name.startswith('/'):
|
||||
search_name = '/' + name
|
||||
|
||||
result = None
|
||||
try:
|
||||
params = {
|
||||
'limit': -1,
|
||||
'all': 1,
|
||||
'size': 0,
|
||||
'trunc_cmd': 0,
|
||||
}
|
||||
containers = self.get_json("/containers/json", params=params)
|
||||
for container in containers:
|
||||
self.log("testing container: %s" % (container['Names']))
|
||||
if isinstance(container['Names'], list) and search_name in container['Names']:
|
||||
result = container
|
||||
break
|
||||
if container['Id'].startswith(name):
|
||||
result = container
|
||||
break
|
||||
if container['Id'] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving container list: %s" % exc)
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return self.get_container_by_id(result['Id'])
|
||||
|
||||
def get_network(self, name=None, network_id=None):
|
||||
'''
|
||||
Lookup a network and return the inspection results.
|
||||
'''
|
||||
if name is None and network_id is None:
|
||||
return None
|
||||
|
||||
result = None
|
||||
|
||||
if network_id is None:
|
||||
try:
|
||||
networks = self.get_json("/networks")
|
||||
for network in networks:
|
||||
self.log("testing network: %s" % (network['Name']))
|
||||
if name == network['Name']:
|
||||
result = network
|
||||
break
|
||||
if network['Id'].startswith(name):
|
||||
result = network
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error retrieving network list: %s" % exc)
|
||||
|
||||
if result is not None:
|
||||
network_id = result['Id']
|
||||
|
||||
if network_id is not None:
|
||||
try:
|
||||
self.log("Inspecting network Id %s" % network_id)
|
||||
result = self.get_json('/networks/{0}', network_id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound as dummy:
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting network: %s" % exc)
|
||||
|
||||
return result
|
||||
|
||||
def _image_lookup(self, name, tag):
|
||||
'''
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
'''
|
||||
try:
|
||||
params = {
|
||||
'only_ids': 0,
|
||||
'all': 0,
|
||||
}
|
||||
if LooseVersion(self.api_version) < LooseVersion('1.25'):
|
||||
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||
params['filter'] = name
|
||||
else:
|
||||
params['filters'] = convert_filters({'reference': name})
|
||||
images = self.get_json("/images/json", params=params)
|
||||
except Exception as exc:
|
||||
self.fail("Error searching for image %s - %s" % (name, str(exc)))
|
||||
if tag:
|
||||
lookup = "%s:%s" % (name, tag)
|
||||
lookup_digest = "%s@%s" % (name, tag)
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
tags = image.get('RepoTags')
|
||||
digests = image.get('RepoDigests')
|
||||
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def find_image(self, name, tag):
|
||||
'''
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
'''
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log("Find image %s:%s" % (name, tag))
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
if registry == 'docker.io':
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log("Check for docker.io image: %s" % repo_name)
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith('library/'):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len('library/'):]
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = "%s/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and '/' not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = "%s/library/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
|
||||
|
||||
if len(images) == 1:
|
||||
try:
|
||||
return self.get_json('/images/{0}/json', images[0]['Id'])
|
||||
except NotFound:
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
|
||||
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
|
||||
def find_image_by_id(self, image_id, accept_missing_image=False):
|
||||
'''
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
'''
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log("Find image %s (by ID)" % image_id)
|
||||
try:
|
||||
return self.get_json('/images/{0}/json', image_id)
|
||||
except NotFound as exc:
|
||||
if not accept_missing_image:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
|
||||
self.log("Image %s not found." % image_id)
|
||||
return None
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
|
||||
|
||||
def pull_image(self, name, tag="latest", platform=None):
|
||||
'''
|
||||
Pull an image
|
||||
'''
|
||||
self.log("Pulling image %s:%s" % (name, tag))
|
||||
old_tag = self.find_image(name, tag)
|
||||
try:
|
||||
repository, image_tag = parse_repository_tag(name)
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
params = {
|
||||
'tag': tag or image_tag or 'latest',
|
||||
'fromImage': repository,
|
||||
}
|
||||
if platform is not None:
|
||||
params['platform'] = platform
|
||||
|
||||
headers = {}
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
|
||||
response = self._post(
|
||||
self._url('/images/create'), params=params, headers=headers,
|
||||
stream=True, timeout=None
|
||||
)
|
||||
self._raise_for_status(response)
|
||||
for line in self._stream_helper(response, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get('error'):
|
||||
if line.get('errorDetail'):
|
||||
error_detail = line.get('errorDetail')
|
||||
self.fail("Error pulling %s - code: %s message: %s" % (name,
|
||||
error_detail.get('code'),
|
||||
error_detail.get('message')))
|
||||
else:
|
||||
self.fail("Error pulling %s - %s" % (name, line.get('error')))
|
||||
except Exception as exc:
|
||||
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
|
||||
|
||||
new_tag = self.find_image(name, tag)
|
||||
|
||||
return new_tag, old_tag == new_tag
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
|
||||
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
|
||||
required_together=None, required_if=None, required_one_of=None, required_by=None,
|
||||
min_docker_api_version=None, option_minimal_versions=None,
|
||||
option_minimal_versions_ignore_params=None, fail_results=None):
|
||||
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get('debug')
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super(AnsibleDockerClient, self).__init__(min_docker_api_version=min_docker_api_version)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
||||
|
||||
def _get_params(self):
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
|
||||
self.option_minimal_versions = dict()
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None:
|
||||
if option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = dict()
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_api = True
|
||||
if 'docker_api_version' in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
|
||||
data['supported'] = support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data['supported']:
|
||||
# Test whether option is specified
|
||||
if 'detect_usage' in data:
|
||||
used = data['detect_usage'](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and 'default' in self.module.argument_spec[option]:
|
||||
used = self.module.params[option] != self.module.argument_spec[option]['default']
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if 'usage_msg' in data:
|
||||
usg = data['usage_msg']
|
||||
else:
|
||||
usg = 'set %s option' % (option, )
|
||||
if not support_docker_api:
|
||||
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
|
||||
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
|
||||
else:
|
||||
# should not happen
|
||||
msg = 'Cannot %s with your configuration.' % (usg, )
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(self, result, warnings_key=None):
|
||||
'''
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
'''
|
||||
if warnings_key is None:
|
||||
warnings_key = ['Warnings']
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn('Docker warning: {0}'.format(warning))
|
||||
elif isinstance(result, string_types) and result:
|
||||
self.module.warn('Docker warning: {0}'.format(result))
|
@ -0,0 +1,357 @@
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import abc
|
||||
import json
|
||||
import shlex
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import string_types
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.auth import resolve_repository_name
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
)
|
||||
|
||||
|
||||
DOCKER_COMMON_ARGS = dict(
|
||||
docker_cli=dict(type='path'),
|
||||
docker_host=dict(type='str', fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
|
||||
tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
|
||||
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
|
||||
ca_path=dict(type='path', aliases=['ca_cert', 'tls_ca_cert', 'cacert_path']),
|
||||
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
|
||||
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
|
||||
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
|
||||
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
|
||||
# debug=dict(type='bool', default=False),
|
||||
cli_context=dict(type='str'),
|
||||
)
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(object):
|
||||
def __init__(self, common_args, min_docker_api_version=None, needs_api_version=True):
|
||||
self._environment = {}
|
||||
if common_args['tls_hostname']:
|
||||
self._environment['DOCKER_TLS_HOSTNAME'] = common_args['tls_hostname']
|
||||
if common_args['api_version'] and common_args['api_version'] != 'auto':
|
||||
self._environment['DOCKER_API_VERSION'] = common_args['api_version']
|
||||
self._cli = common_args.get('docker_cli')
|
||||
if self._cli is None:
|
||||
try:
|
||||
self._cli = get_bin_path('docker')
|
||||
except ValueError:
|
||||
self.fail('Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter')
|
||||
|
||||
self._cli_base = [self._cli]
|
||||
docker_host = common_args['docker_host']
|
||||
if not docker_host and not common_args['cli_context']:
|
||||
docker_host = DEFAULT_DOCKER_HOST
|
||||
if docker_host:
|
||||
self._cli_base.extend(['--host', docker_host])
|
||||
if common_args['validate_certs']:
|
||||
self._cli_base.append('--tlsverify')
|
||||
elif common_args['tls']:
|
||||
self._cli_base.append('--tls')
|
||||
if common_args['ca_path']:
|
||||
self._cli_base.extend(['--tlscacert', common_args['ca_path']])
|
||||
if common_args['client_cert']:
|
||||
self._cli_base.extend(['--tlscert', common_args['client_cert']])
|
||||
if common_args['client_key']:
|
||||
self._cli_base.extend(['--tlskey', common_args['client_key']])
|
||||
if common_args['cli_context']:
|
||||
self._cli_base.extend(['--context', common_args['cli_context']])
|
||||
|
||||
# `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0
|
||||
dummy, self._version, dummy = self.call_cli_json('version', '--format', '{{ json . }}', check_rc=True)
|
||||
self._info = None
|
||||
|
||||
if needs_api_version:
|
||||
if not isinstance(self._version.get('Server'), dict) or not isinstance(self._version['Server'].get('ApiVersion'), string_types):
|
||||
self.fail('Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?')
|
||||
self.docker_api_version_str = to_native(self._version['Server']['ApiVersion'])
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or '1.25'
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
|
||||
else:
|
||||
self.docker_api_version_str = None
|
||||
self.docker_api_version = None
|
||||
if min_docker_api_version is not None:
|
||||
self.fail('Internal error: cannot have needs_api_version=False with min_docker_api_version not None')
|
||||
|
||||
def log(self, msg, pretty_print=False):
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
def get_cli(self):
|
||||
return self._cli
|
||||
|
||||
def get_version_info(self):
|
||||
return self._version
|
||||
|
||||
def _compose_cmd(self, args):
|
||||
return self._cli_base + list(args)
|
||||
|
||||
def _compose_cmd_str(self, args):
|
||||
return ' '.join(shlex.quote(a) for a in self._compose_cmd(args))
|
||||
|
||||
@abc.abstractmethod
|
||||
# def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None):
|
||||
def call_cli(self, *args, **kwargs):
|
||||
# Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually...
|
||||
pass
|
||||
|
||||
# def call_cli_json(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False):
|
||||
def call_cli_json(self, *args, **kwargs):
|
||||
warn_on_stderr = kwargs.pop('warn_on_stderr', False)
|
||||
rc, stdout, stderr = self.call_cli(*args, **kwargs)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_native(stderr))
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except Exception as exc:
|
||||
self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format(
|
||||
cmd=self._compose_cmd_str(args),
|
||||
exc=to_native(exc),
|
||||
stdout=to_native(stdout),
|
||||
))
|
||||
return rc, data, stderr
|
||||
|
||||
# def call_cli_json_stream(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False):
|
||||
def call_cli_json_stream(self, *args, **kwargs):
|
||||
warn_on_stderr = kwargs.pop('warn_on_stderr', False)
|
||||
rc, stdout, stderr = self.call_cli(*args, **kwargs)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_native(stderr))
|
||||
result = []
|
||||
try:
|
||||
for line in stdout.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith(b'{'):
|
||||
result.append(json.loads(line))
|
||||
except Exception as exc:
|
||||
self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format(
|
||||
cmd=self._compose_cmd_str(args),
|
||||
exc=to_native(exc),
|
||||
stdout=to_native(stdout),
|
||||
))
|
||||
return rc, result, stderr
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg, **kwargs):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def warn(self, msg):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
pass
|
||||
|
||||
def get_cli_info(self):
|
||||
if self._info is None:
|
||||
dummy, self._info, dummy = self.call_cli_json('info', '--format', '{{ json . }}', check_rc=True)
|
||||
return self._info
|
||||
|
||||
def get_client_plugin_info(self, component):
|
||||
cli_info = self.get_cli_info()
|
||||
if not isinstance(cli_info.get('ClientInfo'), dict):
|
||||
self.fail('Cannot determine Docker client information. Are you maybe using podman instead of docker?')
|
||||
for plugin in cli_info['ClientInfo'].get('Plugins') or []:
|
||||
if plugin.get('Name') == component:
|
||||
return plugin
|
||||
return None
|
||||
|
||||
def _image_lookup(self, name, tag):
|
||||
'''
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
'''
|
||||
dummy, images, dummy = self.call_cli_json_stream(
|
||||
'image', 'ls', '--format', '{{ json . }}', '--no-trunc', '--filter', 'reference={0}'.format(name),
|
||||
check_rc=True,
|
||||
)
|
||||
if tag:
|
||||
lookup = "%s:%s" % (name, tag)
|
||||
lookup_digest = "%s@%s" % (name, tag)
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
if image.get('Tag') == tag or image.get('Digest') == tag:
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def find_image(self, name, tag):
|
||||
'''
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
'''
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log("Find image %s:%s" % (name, tag))
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = resolve_repository_name(name)
|
||||
if registry == 'docker.io':
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log("Check for docker.io image: %s" % repo_name)
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith('library/'):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len('library/'):]
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = "%s/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and '/' not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = "%s/library/%s" % (registry, repo_name)
|
||||
self.log("Check for docker.io image: %s" % lookup)
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail("Daemon returned more than one result for %s:%s" % (name, tag))
|
||||
|
||||
if len(images) == 1:
|
||||
rc, image, stderr = self.call_cli_json('image', 'inspect', images[0]['ID'])
|
||||
if not image:
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail("Error inspecting image %s:%s - %s" % (name, tag, to_native(stderr)))
|
||||
return image[0]
|
||||
|
||||
self.log("Image %s:%s not found." % (name, tag))
|
||||
return None
|
||||
|
||||
def find_image_by_id(self, image_id, accept_missing_image=False):
|
||||
'''
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
'''
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log("Find image %s (by ID)" % image_id)
|
||||
rc, image, stderr = self.call_cli_json('image', 'inspect', image_id)
|
||||
if not image:
|
||||
if not accept_missing_image:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr)))
|
||||
self.log("Image %s not found." % image_id)
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr)))
|
||||
return image[0]
|
||||
|
||||
|
||||
class AnsibleModuleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
|
||||
required_together=None, required_if=None, required_one_of=None, required_by=None,
|
||||
min_docker_api_version=None, fail_results=None, needs_api_version=True):
|
||||
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params = [('docker_host', 'cli_context')]
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = False # self.module.params['debug']
|
||||
self.check_mode = self.module.check_mode
|
||||
self.diff = self.module._diff
|
||||
|
||||
common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS)
|
||||
super(AnsibleModuleDockerClient, self).__init__(
|
||||
common_args, min_docker_api_version=min_docker_api_version, needs_api_version=needs_api_version,
|
||||
)
|
||||
|
||||
# def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None):
|
||||
def call_cli(self, *args, **kwargs):
|
||||
# Python 2.7 does not like anything than '**kwargs' after '*args', so we have to do this manually...
|
||||
check_rc = kwargs.pop('check_rc', False)
|
||||
data = kwargs.pop('data', None)
|
||||
cwd = kwargs.pop('cwd', None)
|
||||
environ_update = kwargs.pop('environ_update', None)
|
||||
if kwargs:
|
||||
raise TypeError("call_cli() got an unexpected keyword argument '%s'" % list(kwargs)[0])
|
||||
|
||||
environment = self._environment.copy()
|
||||
if environ_update:
|
||||
environment.update(environ_update)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
self._compose_cmd(args),
|
||||
binary_data=True,
|
||||
check_rc=check_rc,
|
||||
cwd=cwd,
|
||||
data=data,
|
||||
encoding=None,
|
||||
environ_update=environment,
|
||||
expand_user_and_vars=False,
|
||||
ignore_invalid_cwd=False,
|
||||
)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def warn(self, msg):
|
||||
self.module.warn(msg)
|
||||
|
||||
def deprecate(self, msg, version=None, date=None, collection_name=None):
|
||||
self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
|
@ -0,0 +1,873 @@
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# Copyright (c) 2023, Léo El Amri (@lel-amri)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import DockerBaseClass
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
from ansible_collections.community.docker.plugins.module_utils._logfmt import (
|
||||
InvalidLogFmt as _InvalidLogFmt,
|
||||
parse_line as _parse_logfmt_line,
|
||||
)
|
||||
|
||||
try:
|
||||
import yaml
|
||||
try:
|
||||
# use C version if possible for speedup
|
||||
from yaml import CSafeDumper as _SafeDumper
|
||||
except ImportError:
|
||||
from yaml import SafeDumper as _SafeDumper
|
||||
HAS_PYYAML = True
|
||||
PYYAML_IMPORT_ERROR = None
|
||||
except ImportError:
|
||||
HAS_PYYAML = False
|
||||
PYYAML_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
DOCKER_COMPOSE_FILES = ('compose.yaml', 'compose.yml', 'docker-compose.yaml', 'docker-compose.yml')
|
||||
|
||||
DOCKER_STATUS_DONE = frozenset((
|
||||
'Started',
|
||||
'Healthy',
|
||||
'Exited',
|
||||
'Restarted',
|
||||
'Running',
|
||||
'Created',
|
||||
'Stopped',
|
||||
'Killed',
|
||||
'Removed',
|
||||
# An extra, specific to containers
|
||||
'Recreated',
|
||||
# Extras for pull events
|
||||
'Pulled',
|
||||
# Extras for built events
|
||||
'Built',
|
||||
))
|
||||
DOCKER_STATUS_WORKING = frozenset((
|
||||
'Creating',
|
||||
'Starting',
|
||||
'Restarting',
|
||||
'Stopping',
|
||||
'Killing',
|
||||
'Removing',
|
||||
# An extra, specific to containers
|
||||
'Recreate',
|
||||
# Extras for pull events
|
||||
'Pulling',
|
||||
# Extras for build start events
|
||||
'Building',
|
||||
))
|
||||
DOCKER_STATUS_PULL = frozenset((
|
||||
'Pulled',
|
||||
'Pulling',
|
||||
))
|
||||
DOCKER_STATUS_BUILD = frozenset((
|
||||
'Built',
|
||||
'Building',
|
||||
))
|
||||
DOCKER_STATUS_ERROR = frozenset((
|
||||
'Error',
|
||||
))
|
||||
DOCKER_STATUS_WARNING = frozenset((
|
||||
'Warning',
|
||||
))
|
||||
DOCKER_STATUS_WAITING = frozenset((
|
||||
'Waiting',
|
||||
))
|
||||
DOCKER_STATUS = frozenset(DOCKER_STATUS_DONE | DOCKER_STATUS_WORKING | DOCKER_STATUS_PULL | DOCKER_STATUS_ERROR | DOCKER_STATUS_WAITING)
|
||||
DOCKER_STATUS_AND_WARNING = frozenset(DOCKER_STATUS | DOCKER_STATUS_WARNING)
|
||||
|
||||
DOCKER_PULL_PROGRESS_DONE = frozenset((
|
||||
'Already exists',
|
||||
'Download complete',
|
||||
'Pull complete',
|
||||
))
|
||||
DOCKER_PULL_PROGRESS_WORKING = frozenset((
|
||||
'Pulling fs layer',
|
||||
'Waiting',
|
||||
'Downloading',
|
||||
'Verifying Checksum',
|
||||
'Extracting',
|
||||
))
|
||||
|
||||
|
||||
class ResourceType(object):
|
||||
UNKNOWN = "unknown"
|
||||
NETWORK = "network"
|
||||
IMAGE = "image"
|
||||
IMAGE_LAYER = "image-layer"
|
||||
VOLUME = "volume"
|
||||
CONTAINER = "container"
|
||||
SERVICE = "service"
|
||||
|
||||
@classmethod
|
||||
def from_docker_compose_event(cls, resource_type):
|
||||
# type: (Type[ResourceType], Text) -> Any
|
||||
return {
|
||||
"Network": cls.NETWORK,
|
||||
"Image": cls.IMAGE,
|
||||
"Volume": cls.VOLUME,
|
||||
"Container": cls.CONTAINER,
|
||||
"Service": cls.SERVICE,
|
||||
}[resource_type]
|
||||
|
||||
|
||||
Event = namedtuple(
|
||||
'Event',
|
||||
['resource_type', 'resource_id', 'status', 'msg']
|
||||
)
|
||||
|
||||
|
||||
_DRY_RUN_MARKER = 'DRY-RUN MODE -'
|
||||
|
||||
_RE_RESOURCE_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<resource_type>Network|Image|Volume|Container)'
|
||||
r'\s+'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'\s+'
|
||||
r'(?P<status>\S(?:|.*\S))'
|
||||
r'\s*'
|
||||
r'$'
|
||||
)
|
||||
|
||||
_RE_PULL_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<service>\S+)'
|
||||
r'\s+'
|
||||
r'(?P<status>%s)'
|
||||
r'\s*'
|
||||
r'$'
|
||||
% '|'.join(re.escape(status) for status in DOCKER_STATUS_PULL)
|
||||
)
|
||||
|
||||
_RE_PULL_PROGRESS = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<layer>\S+)'
|
||||
r'\s+'
|
||||
r'(?P<status>%s)'
|
||||
r'\s*'
|
||||
r'(?:|\s\[[^]]+\]\s+\S+\s*|\s+[0-9.kKmMgGbB]+/[0-9.kKmMgGbB]+\s*)'
|
||||
r'$'
|
||||
% '|'.join(re.escape(status) for status in sorted(DOCKER_PULL_PROGRESS_DONE | DOCKER_PULL_PROGRESS_WORKING))
|
||||
)
|
||||
|
||||
_RE_ERROR_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'\s+'
|
||||
r'(?P<status>%s)'
|
||||
r'\s*'
|
||||
r'(?P<msg>\S.*\S)?'
|
||||
r'$'
|
||||
% '|'.join(re.escape(status) for status in DOCKER_STATUS_ERROR)
|
||||
)
|
||||
|
||||
_RE_WARNING_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'\s+'
|
||||
r'(?P<status>%s)'
|
||||
r'\s*'
|
||||
r'(?P<msg>\S.*\S)?'
|
||||
r'$'
|
||||
% '|'.join(re.escape(status) for status in DOCKER_STATUS_WARNING)
|
||||
)
|
||||
|
||||
_RE_CONTINUE_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'\s+'
|
||||
r'-'
|
||||
r'\s*'
|
||||
r'(?P<msg>\S(?:|.*\S))'
|
||||
r'$'
|
||||
)
|
||||
|
||||
_RE_SKIPPED_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'\s+'
|
||||
r'Skipped(?: -'
|
||||
r'\s*'
|
||||
r'(?P<msg>\S(?:|.*\S))|\s*)'
|
||||
r'$'
|
||||
)
|
||||
|
||||
_RE_BUILD_START_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'build service'
|
||||
r'\s+'
|
||||
r'(?P<resource_id>\S+)'
|
||||
r'$'
|
||||
)
|
||||
|
||||
_RE_BUILD_PROGRESS_EVENT = re.compile(
|
||||
r'^'
|
||||
r'\s*'
|
||||
r'==>'
|
||||
r'\s+'
|
||||
r'(?P<msg>.*)'
|
||||
r'$'
|
||||
)
|
||||
|
||||
# The following needs to be kept in sync with the MINIMUM_VERSION compose_v2 docs fragment
|
||||
MINIMUM_COMPOSE_VERSION = '2.18.0'
|
||||
|
||||
|
||||
def _extract_event(line, warn_function=None):
|
||||
match = _RE_RESOURCE_EVENT.match(line)
|
||||
if match is not None:
|
||||
status = match.group('status')
|
||||
msg = None
|
||||
if status not in DOCKER_STATUS:
|
||||
status, msg = msg, status
|
||||
return Event(
|
||||
ResourceType.from_docker_compose_event(match.group('resource_type')),
|
||||
match.group('resource_id'),
|
||||
status,
|
||||
msg,
|
||||
), True
|
||||
match = _RE_PULL_EVENT.match(line)
|
||||
if match:
|
||||
return Event(
|
||||
ResourceType.SERVICE,
|
||||
match.group('service'),
|
||||
match.group('status'),
|
||||
None,
|
||||
), True
|
||||
match = _RE_ERROR_EVENT.match(line)
|
||||
if match:
|
||||
return Event(
|
||||
ResourceType.UNKNOWN,
|
||||
match.group('resource_id'),
|
||||
match.group('status'),
|
||||
match.group('msg') or None,
|
||||
), True
|
||||
match = _RE_WARNING_EVENT.match(line)
|
||||
if match:
|
||||
if warn_function:
|
||||
if match.group('msg'):
|
||||
msg = '{rid}: {msg}'
|
||||
else:
|
||||
msg = 'Unspecified warning for {rid}'
|
||||
warn_function(msg.format(rid=match.group('resource_id'), msg=match.group('msg')))
|
||||
return None, True
|
||||
match = _RE_PULL_PROGRESS.match(line)
|
||||
if match:
|
||||
return Event(
|
||||
ResourceType.IMAGE_LAYER,
|
||||
match.group('layer'),
|
||||
match.group('status'),
|
||||
None,
|
||||
), True
|
||||
match = _RE_SKIPPED_EVENT.match(line)
|
||||
if match:
|
||||
return Event(
|
||||
ResourceType.UNKNOWN,
|
||||
match.group('resource_id'),
|
||||
'Skipped',
|
||||
match.group('msg'),
|
||||
), True
|
||||
match = _RE_BUILD_START_EVENT.match(line)
|
||||
if match:
|
||||
return Event(
|
||||
ResourceType.SERVICE,
|
||||
match.group('resource_id'),
|
||||
'Building',
|
||||
None,
|
||||
), True
|
||||
return None, False
|
||||
|
||||
|
||||
def _extract_logfmt_event(line, warn_function=None):
|
||||
try:
|
||||
result = _parse_logfmt_line(line, logrus_mode=True)
|
||||
except _InvalidLogFmt:
|
||||
return None, False
|
||||
if 'time' not in result or 'level' not in result or 'msg' not in result:
|
||||
return None, False
|
||||
if result['level'] == 'warning':
|
||||
if warn_function:
|
||||
warn_function(result['msg'])
|
||||
return None, True
|
||||
# TODO: no idea what to do with this
|
||||
return None, False
|
||||
|
||||
|
||||
def _warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function):
|
||||
if warn_missing_dry_run_prefix and warn_function:
|
||||
# This could be a bug, a change of docker compose's output format, ...
|
||||
# Tell the user to report it to us :-)
|
||||
warn_function(
|
||||
'Event line is missing dry-run mode marker: {0!r}. Please report this at '
|
||||
'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md'
|
||||
.format(line)
|
||||
)
|
||||
|
||||
|
||||
def _warn_unparsable_line(line, warn_function):
|
||||
# This could be a bug, a change of docker compose's output format, ...
|
||||
# Tell the user to report it to us :-)
|
||||
if warn_function:
|
||||
warn_function(
|
||||
'Cannot parse event from line: {0!r}. Please report this at '
|
||||
'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md'
|
||||
.format(line)
|
||||
)
|
||||
|
||||
|
||||
def _find_last_event_for(events, resource_id):
|
||||
for index, event in enumerate(reversed(events)):
|
||||
if event.resource_id == resource_id:
|
||||
return len(events) - 1 - index, event
|
||||
return None
|
||||
|
||||
|
||||
def _concat_event_msg(event, append_msg):
|
||||
return Event(
|
||||
event.resource_type,
|
||||
event.resource_id,
|
||||
event.status,
|
||||
'\n'.join(msg for msg in [event.msg, append_msg] if msg is not None),
|
||||
)
|
||||
|
||||
|
||||
_JSON_LEVEL_TO_STATUS_MAP = {
|
||||
'warning': 'Warning',
|
||||
'error': 'Error',
|
||||
}
|
||||
|
||||
|
||||
def parse_json_events(stderr, warn_function=None):
|
||||
events = []
|
||||
stderr_lines = stderr.splitlines()
|
||||
if stderr_lines and stderr_lines[-1] == b'':
|
||||
del stderr_lines[-1]
|
||||
for line in stderr_lines:
|
||||
line = line.strip()
|
||||
if not line.startswith(b'{') or not line.endswith(b'}'):
|
||||
if line.startswith(b'Warning: '):
|
||||
# This is a bug in Compose that will get fixed by https://github.com/docker/compose/pull/11996
|
||||
event = Event(
|
||||
ResourceType.UNKNOWN,
|
||||
None,
|
||||
'Warning',
|
||||
to_native(line[len(b'Warning: '):]),
|
||||
)
|
||||
events.append(event)
|
||||
continue
|
||||
if warn_function:
|
||||
warn_function(
|
||||
'Cannot parse event from non-JSON line: {0!r}. Please report this at '
|
||||
'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md'
|
||||
.format(line)
|
||||
)
|
||||
continue
|
||||
try:
|
||||
line_data = json.loads(line)
|
||||
except Exception as exc:
|
||||
if warn_function:
|
||||
warn_function(
|
||||
'Cannot parse event from line: {0!r}: {1}. Please report this at '
|
||||
'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md'
|
||||
.format(line, exc)
|
||||
)
|
||||
continue
|
||||
if line_data.get('tail'):
|
||||
resource_type = ResourceType.UNKNOWN
|
||||
msg = line_data.get('text')
|
||||
status = 'Error'
|
||||
if isinstance(msg, str) and msg.lower().startswith('warning:'):
|
||||
# For some reason, Writer.TailMsgf() is always used for errors *except* in one place,
|
||||
# where its message is prepended with 'WARNING: ' (in pkg/compose/pull.go).
|
||||
status = 'Warning'
|
||||
msg = msg[len('warning:'):].lstrip()
|
||||
event = Event(
|
||||
resource_type,
|
||||
None,
|
||||
status,
|
||||
msg,
|
||||
)
|
||||
elif line_data.get('error'):
|
||||
resource_type = ResourceType.UNKNOWN
|
||||
event = Event(
|
||||
resource_type,
|
||||
line_data.get('id'),
|
||||
'Error',
|
||||
line_data.get('message'),
|
||||
)
|
||||
else:
|
||||
resource_type = ResourceType.UNKNOWN
|
||||
resource_id = line_data.get('id')
|
||||
status = line_data.get('status')
|
||||
text = line_data.get('text')
|
||||
if resource_id == " " and text and text.startswith("build service "):
|
||||
# Example:
|
||||
# {"dry-run":true,"id":" ","text":"build service app"}
|
||||
resource_id = "S" + text[len("build s"):]
|
||||
text = "Building"
|
||||
if resource_id == "==>" and text and text.startswith("==> writing image "):
|
||||
# Example:
|
||||
# {"dry-run":true,"id":"==>","text":"==> writing image dryRun-7d1043473d55bfa90e8530d35801d4e381bc69f0"}
|
||||
continue
|
||||
if resource_id == "==> ==>" and text and text.startswith("naming to "):
|
||||
# Example:
|
||||
# {"dry-run":true,"id":"==> ==>","text":"naming to display-app"}
|
||||
continue
|
||||
if isinstance(resource_id, str) and ' ' in resource_id:
|
||||
resource_type_str, resource_id = resource_id.split(' ', 1)
|
||||
try:
|
||||
resource_type = ResourceType.from_docker_compose_event(resource_type_str)
|
||||
except KeyError:
|
||||
if warn_function:
|
||||
warn_function(
|
||||
'Unknown resource type {0!r} in line {1!r}. Please report this at '
|
||||
'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md'
|
||||
.format(resource_type_str, line)
|
||||
)
|
||||
resource_type = ResourceType.UNKNOWN
|
||||
elif text in DOCKER_STATUS_PULL:
|
||||
resource_type = ResourceType.IMAGE
|
||||
status, text = text, status
|
||||
elif text in DOCKER_PULL_PROGRESS_DONE or line_data.get('text') in DOCKER_PULL_PROGRESS_WORKING:
|
||||
resource_type = ResourceType.IMAGE_LAYER
|
||||
status, text = text, status
|
||||
elif status is None and isinstance(text, string_types) and text.startswith('Skipped - '):
|
||||
status, text = text.split(' - ', 1)
|
||||
elif line_data.get('level') in _JSON_LEVEL_TO_STATUS_MAP and 'msg' in line_data:
|
||||
status = _JSON_LEVEL_TO_STATUS_MAP[line_data['level']]
|
||||
text = line_data['msg']
|
||||
if status not in DOCKER_STATUS_AND_WARNING and text in DOCKER_STATUS_AND_WARNING:
|
||||
status, text = text, status
|
||||
event = Event(
|
||||
resource_type,
|
||||
resource_id,
|
||||
status,
|
||||
text,
|
||||
)
|
||||
|
||||
events.append(event)
|
||||
return events
|
||||
|
||||
|
||||
def parse_events(stderr, dry_run=False, warn_function=None, nonzero_rc=False):
|
||||
events = []
|
||||
error_event = None
|
||||
stderr_lines = stderr.splitlines()
|
||||
if stderr_lines and stderr_lines[-1] == b'':
|
||||
del stderr_lines[-1]
|
||||
for index, line in enumerate(stderr_lines):
|
||||
line = to_native(line.strip())
|
||||
if not line:
|
||||
continue
|
||||
warn_missing_dry_run_prefix = False
|
||||
if dry_run:
|
||||
if line.startswith(_DRY_RUN_MARKER):
|
||||
line = line[len(_DRY_RUN_MARKER):].lstrip()
|
||||
else:
|
||||
warn_missing_dry_run_prefix = True
|
||||
event, parsed = _extract_event(line, warn_function=warn_function)
|
||||
if event is not None:
|
||||
events.append(event)
|
||||
if event.status in DOCKER_STATUS_ERROR:
|
||||
error_event = event
|
||||
else:
|
||||
error_event = None
|
||||
_warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function)
|
||||
continue
|
||||
elif parsed:
|
||||
continue
|
||||
match = _RE_BUILD_PROGRESS_EVENT.match(line)
|
||||
if match:
|
||||
# Ignore this
|
||||
continue
|
||||
match = _RE_CONTINUE_EVENT.match(line)
|
||||
if match:
|
||||
# Continuing an existing event
|
||||
index_event = _find_last_event_for(events, match.group('resource_id'))
|
||||
if index_event is not None:
|
||||
index, event = index_event
|
||||
events[index] = _concat_event_msg(event, match.group('msg'))
|
||||
event, parsed = _extract_logfmt_event(line, warn_function=warn_function)
|
||||
if event is not None:
|
||||
events.append(event)
|
||||
elif parsed:
|
||||
continue
|
||||
if error_event is not None:
|
||||
# Unparsable line that apparently belongs to the previous error event
|
||||
events[-1] = _concat_event_msg(error_event, line)
|
||||
continue
|
||||
if line.startswith('Error '):
|
||||
# Error message that is independent of an error event
|
||||
error_event = Event(
|
||||
ResourceType.UNKNOWN,
|
||||
'',
|
||||
'Error',
|
||||
line,
|
||||
)
|
||||
events.append(error_event)
|
||||
continue
|
||||
if len(stderr_lines) == 1 or (nonzero_rc and index == len(stderr_lines) - 1):
|
||||
# **Very likely** an error message that is independent of an error event
|
||||
error_event = Event(
|
||||
ResourceType.UNKNOWN,
|
||||
'',
|
||||
'Error',
|
||||
line,
|
||||
)
|
||||
events.append(error_event)
|
||||
continue
|
||||
_warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function)
|
||||
_warn_unparsable_line(line, warn_function)
|
||||
return events
|
||||
|
||||
|
||||
def has_changes(events, ignore_service_pull_events=False, ignore_build_events=False):
|
||||
for event in events:
|
||||
if event.status in DOCKER_STATUS_WORKING:
|
||||
if ignore_service_pull_events and event.status in DOCKER_STATUS_PULL:
|
||||
continue
|
||||
if ignore_build_events and event.status in DOCKER_STATUS_BUILD:
|
||||
continue
|
||||
return True
|
||||
if event.resource_type == ResourceType.IMAGE_LAYER and event.status in DOCKER_PULL_PROGRESS_WORKING:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def extract_actions(events):
|
||||
actions = []
|
||||
pull_actions = set()
|
||||
for event in events:
|
||||
if event.resource_type == ResourceType.IMAGE_LAYER and event.status in DOCKER_PULL_PROGRESS_WORKING:
|
||||
pull_id = (event.resource_id, event.status)
|
||||
if pull_id not in pull_actions:
|
||||
pull_actions.add(pull_id)
|
||||
actions.append({
|
||||
'what': event.resource_type,
|
||||
'id': event.resource_id,
|
||||
'status': event.status,
|
||||
})
|
||||
if event.resource_type != ResourceType.IMAGE_LAYER and event.status in DOCKER_STATUS_WORKING:
|
||||
actions.append({
|
||||
'what': event.resource_type,
|
||||
'id': event.resource_id,
|
||||
'status': event.status,
|
||||
})
|
||||
return actions
|
||||
|
||||
|
||||
def emit_warnings(events, warn_function):
|
||||
for event in events:
|
||||
# If a message is present, assume it is a warning
|
||||
if (event.status is None and event.msg is not None) or event.status in DOCKER_STATUS_WARNING:
|
||||
warn_function('Docker compose: {resource_type} {resource_id}: {msg}'.format(
|
||||
resource_type=event.resource_type,
|
||||
resource_id=event.resource_id,
|
||||
msg=event.msg,
|
||||
))
|
||||
|
||||
|
||||
def is_failed(events, rc):
|
||||
if rc:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def update_failed(result, events, args, stdout, stderr, rc, cli):
|
||||
if not rc:
|
||||
return False
|
||||
errors = []
|
||||
for event in events:
|
||||
if event.status in DOCKER_STATUS_ERROR:
|
||||
if event.resource_id is None:
|
||||
if event.resource_type == 'unknown':
|
||||
msg = 'General error: ' if event.resource_type == 'unknown' else 'Error when processing {resource_type}: '
|
||||
else:
|
||||
msg = 'Error when processing {resource_type} {resource_id}: '
|
||||
if event.resource_type == 'unknown':
|
||||
msg = 'Error when processing {resource_id}: '
|
||||
if event.resource_id == '':
|
||||
msg = 'General error: '
|
||||
msg += '{status}' if event.msg is None else '{msg}'
|
||||
errors.append(msg.format(
|
||||
resource_type=event.resource_type,
|
||||
resource_id=event.resource_id,
|
||||
status=event.status,
|
||||
msg=event.msg,
|
||||
))
|
||||
if not errors:
|
||||
errors.append('Return code {code} is non-zero'.format(code=rc))
|
||||
result['failed'] = True
|
||||
result['msg'] = '\n'.join(errors)
|
||||
result['cmd'] = ' '.join(shlex_quote(arg) for arg in [cli] + args)
|
||||
result['stdout'] = to_native(stdout)
|
||||
result['stderr'] = to_native(stderr)
|
||||
result['rc'] = rc
|
||||
return True
|
||||
|
||||
|
||||
def common_compose_argspec():
|
||||
return dict(
|
||||
project_src=dict(type='path'),
|
||||
project_name=dict(type='str'),
|
||||
files=dict(type='list', elements='path'),
|
||||
definition=dict(type='dict'),
|
||||
env_files=dict(type='list', elements='path'),
|
||||
profiles=dict(type='list', elements='str'),
|
||||
check_files_existing=dict(type='bool', default=True),
|
||||
)
|
||||
|
||||
|
||||
def common_compose_argspec_ex():
|
||||
return dict(
|
||||
argspec=common_compose_argspec(),
|
||||
mutually_exclusive=[
|
||||
('definition', 'project_src'),
|
||||
('definition', 'files')
|
||||
],
|
||||
required_one_of=[
|
||||
('definition', 'project_src'),
|
||||
],
|
||||
required_by={
|
||||
'definition': ('project_name', ),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def combine_binary_output(*outputs):
|
||||
return b'\n'.join(out for out in outputs if out)
|
||||
|
||||
|
||||
def combine_text_output(*outputs):
|
||||
return '\n'.join(out for out in outputs if out)
|
||||
|
||||
|
||||
class BaseComposeManager(DockerBaseClass):
|
||||
def __init__(self, client, min_version=MINIMUM_COMPOSE_VERSION):
|
||||
super(BaseComposeManager, self).__init__()
|
||||
self.client = client
|
||||
self.check_mode = self.client.check_mode
|
||||
self.cleanup_dirs = set()
|
||||
parameters = self.client.module.params
|
||||
|
||||
if parameters['definition'] is not None and not HAS_PYYAML:
|
||||
self.fail(
|
||||
missing_required_lib('PyYAML'),
|
||||
exception=PYYAML_IMPORT_ERROR
|
||||
)
|
||||
|
||||
self.project_name = parameters['project_name']
|
||||
if parameters['definition'] is not None:
|
||||
self.project_src = tempfile.mkdtemp(prefix='ansible')
|
||||
self.cleanup_dirs.add(self.project_src)
|
||||
compose_file = os.path.join(self.project_src, 'compose.yaml')
|
||||
self.client.module.add_cleanup_file(compose_file)
|
||||
try:
|
||||
with open(compose_file, 'wb') as f:
|
||||
yaml.dump(parameters['definition'], f, encoding="utf-8", Dumper=_SafeDumper)
|
||||
except Exception as exc:
|
||||
self.fail("Error writing to %s - %s" % (compose_file, to_native(exc)))
|
||||
else:
|
||||
self.project_src = os.path.abspath(parameters['project_src'])
|
||||
|
||||
self.files = parameters['files']
|
||||
self.env_files = parameters['env_files']
|
||||
self.profiles = parameters['profiles']
|
||||
|
||||
compose_version = self.get_compose_version()
|
||||
self.compose_version = LooseVersion(compose_version)
|
||||
if self.compose_version < LooseVersion(min_version):
|
||||
self.fail('Docker CLI {cli} has the compose plugin with version {version}; need version {min_version} or later'.format(
|
||||
cli=self.client.get_cli(),
|
||||
version=compose_version,
|
||||
min_version=min_version,
|
||||
))
|
||||
|
||||
if not os.path.isdir(self.project_src):
|
||||
self.fail('"{0}" is not a directory'.format(self.project_src))
|
||||
|
||||
self.check_files_existing = parameters['check_files_existing']
|
||||
if self.files:
|
||||
for file in self.files:
|
||||
path = os.path.join(self.project_src, file)
|
||||
if not os.path.exists(path):
|
||||
self.fail('Cannot find Compose file "{0}" relative to project directory "{1}"'.format(file, self.project_src))
|
||||
elif self.check_files_existing and all(not os.path.exists(os.path.join(self.project_src, f)) for f in DOCKER_COMPOSE_FILES):
|
||||
filenames = ', '.join(DOCKER_COMPOSE_FILES[:-1])
|
||||
self.fail('"{0}" does not contain {1}, or {2}'.format(self.project_src, filenames, DOCKER_COMPOSE_FILES[-1]))
|
||||
|
||||
# Support for JSON output was added in Compose 2.29.0 (https://github.com/docker/compose/releases/tag/v2.29.0);
|
||||
# more precisely in https://github.com/docker/compose/pull/11478
|
||||
self.use_json_events = self.compose_version >= LooseVersion('2.29.0')
|
||||
|
||||
def get_compose_version(self):
|
||||
return self.get_compose_version_from_cli() or self.get_compose_version_from_api()
|
||||
|
||||
def get_compose_version_from_cli(self):
|
||||
rc, version_info, stderr = self.client.call_cli('compose', 'version', '--format', 'json')
|
||||
if rc:
|
||||
return None
|
||||
try:
|
||||
version = json.loads(version_info)['version']
|
||||
if version == 'dev':
|
||||
return None
|
||||
return version.lstrip('v')
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def get_compose_version_from_api(self):
|
||||
compose = self.client.get_client_plugin_info('compose')
|
||||
if compose is None:
|
||||
self.fail('Docker CLI {0} does not have the compose plugin installed'.format(self.client.get_cli()))
|
||||
if compose['Version'] == 'dev':
|
||||
self.fail(
|
||||
'Docker CLI {0} has a compose plugin installed, but it reports version "dev".'
|
||||
' Please use a version of the plugin that returns a proper version.'
|
||||
.format(self.client.get_cli())
|
||||
)
|
||||
return compose['Version'].lstrip('v')
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.cleanup()
|
||||
self.client.fail(msg, **kwargs)
|
||||
|
||||
def get_base_args(self, plain_progress=False):
|
||||
args = ['compose', '--ansi', 'never']
|
||||
if self.use_json_events and not plain_progress:
|
||||
args.extend(['--progress', 'json'])
|
||||
elif self.compose_version >= LooseVersion('2.19.0'):
|
||||
# https://github.com/docker/compose/pull/10690
|
||||
args.extend(['--progress', 'plain'])
|
||||
args.extend(['--project-directory', self.project_src])
|
||||
if self.project_name:
|
||||
args.extend(['--project-name', self.project_name])
|
||||
for file in self.files or []:
|
||||
args.extend(['--file', file])
|
||||
for env_file in self.env_files or []:
|
||||
args.extend(['--env-file', env_file])
|
||||
for profile in self.profiles or []:
|
||||
args.extend(['--profile', profile])
|
||||
return args
|
||||
|
||||
def _handle_failed_cli_call(self, args, rc, stdout, stderr):
|
||||
events = parse_json_events(stderr, warn_function=self.client.warn)
|
||||
result = {}
|
||||
self.update_failed(result, events, args, stdout, stderr, rc)
|
||||
self.client.module.exit_json(**result)
|
||||
|
||||
def list_containers_raw(self):
|
||||
args = self.get_base_args() + ['ps', '--format', 'json', '--all']
|
||||
if self.compose_version >= LooseVersion('2.23.0'):
|
||||
# https://github.com/docker/compose/pull/11038
|
||||
args.append('--no-trunc')
|
||||
kwargs = dict(cwd=self.project_src, check_rc=not self.use_json_events)
|
||||
if self.compose_version >= LooseVersion('2.21.0'):
|
||||
# Breaking change in 2.21.0: https://github.com/docker/compose/pull/10918
|
||||
rc, containers, stderr = self.client.call_cli_json_stream(*args, **kwargs)
|
||||
else:
|
||||
rc, containers, stderr = self.client.call_cli_json(*args, **kwargs)
|
||||
if self.use_json_events and rc != 0:
|
||||
self._handle_failed_cli_call(args, rc, containers, stderr)
|
||||
return containers
|
||||
|
||||
def list_containers(self):
|
||||
result = []
|
||||
for container in self.list_containers_raw():
|
||||
labels = {}
|
||||
if container.get('Labels'):
|
||||
for part in container['Labels'].split(','):
|
||||
label_value = part.split('=', 1)
|
||||
labels[label_value[0]] = label_value[1] if len(label_value) > 1 else ''
|
||||
container['Labels'] = labels
|
||||
container['Names'] = container.get('Names', container['Name']).split(',')
|
||||
container['Networks'] = container.get('Networks', '').split(',')
|
||||
container['Publishers'] = container.get('Publishers') or []
|
||||
result.append(container)
|
||||
return result
|
||||
|
||||
def list_images(self):
|
||||
args = self.get_base_args() + ['images', '--format', 'json']
|
||||
kwargs = dict(cwd=self.project_src, check_rc=not self.use_json_events)
|
||||
rc, images, stderr = self.client.call_cli_json(*args, **kwargs)
|
||||
if self.use_json_events and rc != 0:
|
||||
self._handle_failed_cli_call(args, rc, images, stderr)
|
||||
return images
|
||||
|
||||
def parse_events(self, stderr, dry_run=False, nonzero_rc=False):
|
||||
if self.use_json_events:
|
||||
return parse_json_events(stderr, warn_function=self.client.warn)
|
||||
return parse_events(stderr, dry_run=dry_run, warn_function=self.client.warn, nonzero_rc=nonzero_rc)
|
||||
|
||||
def emit_warnings(self, events):
|
||||
emit_warnings(events, warn_function=self.client.warn)
|
||||
|
||||
def update_result(
|
||||
self,
|
||||
result,
|
||||
events,
|
||||
stdout,
|
||||
stderr,
|
||||
ignore_service_pull_events=False,
|
||||
ignore_build_events=False,
|
||||
):
|
||||
result['changed'] = result.get('changed', False) or has_changes(
|
||||
events,
|
||||
ignore_service_pull_events=ignore_service_pull_events,
|
||||
ignore_build_events=ignore_build_events,
|
||||
)
|
||||
result['actions'] = result.get('actions', []) + extract_actions(events)
|
||||
result['stdout'] = combine_text_output(result.get('stdout'), to_native(stdout))
|
||||
result['stderr'] = combine_text_output(result.get('stderr'), to_native(stderr))
|
||||
|
||||
def update_failed(self, result, events, args, stdout, stderr, rc):
|
||||
return update_failed(
|
||||
result,
|
||||
events,
|
||||
args=args,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
rc=rc,
|
||||
cli=self.client.get_cli(),
|
||||
)
|
||||
|
||||
def cleanup_result(self, result):
|
||||
if not result.get('failed'):
|
||||
# Only return stdout and stderr if it is not empty
|
||||
for res in ('stdout', 'stderr'):
|
||||
if result.get(res) == '':
|
||||
result.pop(res)
|
||||
|
||||
def cleanup(self):
|
||||
for dir in self.cleanup_dirs:
|
||||
try:
|
||||
shutil.rmtree(dir, True)
|
||||
except Exception:
|
||||
# should not happen, but simply ignore to be on the safe side
|
||||
pass
|
@ -0,0 +1,442 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import stat
|
||||
import tarfile
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound
|
||||
|
||||
|
||||
class DockerFileCopyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DockerUnexpectedError(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
class DockerFileNotFound(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
def _put_archive(client, container, path, data):
|
||||
# data can also be file object for streaming. This is because _put uses requests's put().
|
||||
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
|
||||
url = client._url('/containers/{0}/archive', container)
|
||||
res = client._put(url, params={'path': path}, data=data)
|
||||
client._raise_for_status(res)
|
||||
return res.status_code == 200
|
||||
|
||||
|
||||
def _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
|
||||
if not stat.S_ISLNK(file_stat.st_mode):
|
||||
raise DockerUnexpectedError('stat information is not for a symlink')
|
||||
bio = io.BytesIO()
|
||||
with tarfile.open(fileobj=bio, mode='w|', dereference=False, encoding='utf-8') as tar:
|
||||
# Note that without both name (bytes) and arcname (unicode), this either fails for
|
||||
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
|
||||
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
|
||||
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.uname = ''
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.gname = ''
|
||||
tarinfo.mode &= 0o700
|
||||
if mode is not None:
|
||||
tarinfo.mode = mode
|
||||
if not tarinfo.issym():
|
||||
raise DockerUnexpectedError('stat information is not for a symlink')
|
||||
tar.addfile(tarinfo)
|
||||
return bio.getvalue()
|
||||
|
||||
|
||||
def _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
|
||||
yield _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode, user_name)
|
||||
|
||||
|
||||
def _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
|
||||
if not stat.S_ISREG(file_stat.st_mode):
|
||||
raise DockerUnexpectedError('stat information is not for a regular file')
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
|
||||
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = file_stat.st_size
|
||||
tarinfo.mtime = file_stat.st_mtime
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ''
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
size = tarinfo.size
|
||||
total_size += size
|
||||
with open(b_in_path, 'rb') as f:
|
||||
while size > 0:
|
||||
to_read = min(size, 65536)
|
||||
buf = f.read(to_read)
|
||||
if not buf:
|
||||
break
|
||||
size -= len(buf)
|
||||
yield buf
|
||||
if size:
|
||||
# If for some reason the file shrunk, fill up to the announced size with zeros.
|
||||
# (If it enlarged, ignore the remainder.)
|
||||
yield tarfile.NUL * size
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=None):
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
|
||||
tarinfo.mode = mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = len(content)
|
||||
try:
|
||||
tarinfo.mtime = int(datetime.datetime.now().timestamp())
|
||||
except AttributeError:
|
||||
# Python 2 (or more precisely: Python < 3.3) has no timestamp(). Use the following
|
||||
# expression for Python 2:
|
||||
tarinfo.mtime = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ''
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
total_size += len(content)
|
||||
yield content
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, user_name=None, follow_links=False):
|
||||
"""Transfer a file from local to Docker container."""
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise DockerFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
|
||||
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
if follow_links:
|
||||
file_stat = os.stat(b_in_path)
|
||||
else:
|
||||
file_stat = os.lstat(b_in_path)
|
||||
|
||||
if stat.S_ISREG(file_stat.st_mode):
|
||||
stream = _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
|
||||
elif stat.S_ISLNK(file_stat.st_mode):
|
||||
stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
|
||||
else:
|
||||
raise DockerFileCopyError(
|
||||
'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format(
|
||||
' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode)))
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
|
||||
|
||||
|
||||
def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None):
|
||||
"""Transfer a file from local to Docker container."""
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
stream = _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=user_name)
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
|
||||
|
||||
|
||||
def stat_file(client, container, in_path, follow_links=False, log=None):
|
||||
"""Fetch information on a file from a Docker container to local.
|
||||
|
||||
Return a tuple ``(path, stat_data, link_target)`` where:
|
||||
|
||||
:path: is the resolved path in case ``follow_links=True``;
|
||||
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
|
||||
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
|
||||
``mtime`` (string), and ``linkTarget`` (string);
|
||||
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
|
||||
and a string with the symlink target otherwise.
|
||||
"""
|
||||
considered_in_paths = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path))
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log('FETCH: Stating "%s"' % in_path)
|
||||
|
||||
response = client._head(
|
||||
client._url('/containers/{0}/archive', container),
|
||||
params={'path': in_path},
|
||||
)
|
||||
if response.status_code == 404:
|
||||
return in_path, None, None
|
||||
client._raise_for_status(response)
|
||||
header = response.headers.get('x-docker-container-path-stat')
|
||||
try:
|
||||
stat_data = json.loads(base64.b64decode(header))
|
||||
except Exception as exc:
|
||||
raise DockerUnexpectedError(
|
||||
'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}'
|
||||
.format(in_path=in_path, container=container, header=header, exc=exc)
|
||||
)
|
||||
|
||||
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
|
||||
if stat_data['mode'] & (1 << (32 - 5)) != 0:
|
||||
link_target = stat_data['linkTarget']
|
||||
if not follow_links:
|
||||
return in_path, stat_data, link_target
|
||||
in_path = os.path.join(os.path.split(in_path)[0], link_target)
|
||||
continue
|
||||
|
||||
return in_path, stat_data, None
|
||||
|
||||
|
||||
class _RawGeneratorFileobj(io.RawIOBase):
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
self._buf = b''
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def _readinto_from_buf(self, b, index, length):
|
||||
cpy = min(length - index, len(self._buf))
|
||||
if cpy:
|
||||
b[index:index + cpy] = self._buf[:cpy]
|
||||
self._buf = self._buf[cpy:]
|
||||
index += cpy
|
||||
return index
|
||||
|
||||
def readinto(self, b):
|
||||
index = 0
|
||||
length = len(b)
|
||||
|
||||
index = self._readinto_from_buf(b, index, length)
|
||||
if index == length:
|
||||
return index
|
||||
|
||||
try:
|
||||
self._buf += next(self._stream)
|
||||
except StopIteration:
|
||||
return index
|
||||
|
||||
return self._readinto_from_buf(b, index, length)
|
||||
|
||||
|
||||
def _stream_generator_to_fileobj(stream):
|
||||
'''Given a generator that generates chunks of bytes, create a readable buffered stream.'''
|
||||
raw = _RawGeneratorFileobj(stream)
|
||||
return io.BufferedReader(raw)
|
||||
|
||||
|
||||
def fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=False, log=None):
|
||||
"""Fetch a file (as a tar file entry) from a Docker container to local."""
|
||||
considered_in_paths = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log('FETCH: Fetching "%s"' % in_path)
|
||||
try:
|
||||
stream = client.get_raw_stream(
|
||||
'/containers/{0}/archive', container,
|
||||
params={'path': in_path},
|
||||
headers={'Accept-Encoding': 'identity'},
|
||||
)
|
||||
except NotFound:
|
||||
return process_none(in_path)
|
||||
|
||||
with tarfile.open(fileobj=_stream_generator_to_fileobj(stream), mode='r|') as tar:
|
||||
symlink_member = None
|
||||
result = None
|
||||
found = False
|
||||
for member in tar:
|
||||
if found:
|
||||
raise DockerUnexpectedError('Received tarfile contains more than one file!')
|
||||
found = True
|
||||
if member.issym():
|
||||
symlink_member = member
|
||||
continue
|
||||
if member.isfile():
|
||||
result = process_regular(in_path, tar, member)
|
||||
continue
|
||||
result = process_other(in_path, member)
|
||||
if symlink_member:
|
||||
if not follow_links:
|
||||
return process_symlink(in_path, symlink_member)
|
||||
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
|
||||
if log:
|
||||
log('FETCH: Following symbolic link to "%s"' % in_path)
|
||||
continue
|
||||
if found:
|
||||
return result
|
||||
raise DockerUnexpectedError('Received tarfile is empty!')
|
||||
|
||||
|
||||
def fetch_file(client, container, in_path, out_path, follow_links=False, log=None):
|
||||
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
|
||||
|
||||
def process_none(in_path):
|
||||
raise DockerFileNotFound(
|
||||
'File {in_path} does not exist in container {container}'
|
||||
.format(in_path=in_path, container=container)
|
||||
)
|
||||
|
||||
def process_regular(in_path, tar, member):
|
||||
if not follow_links and os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
|
||||
with open(b_out_path, 'wb') as out_f:
|
||||
shutil.copyfileobj(in_f, out_f)
|
||||
return in_path
|
||||
|
||||
def process_symlink(in_path, member):
|
||||
if os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
os.symlink(member.linkname, b_out_path)
|
||||
return in_path
|
||||
|
||||
def process_other(in_path, member):
|
||||
raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
|
||||
|
||||
return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)
|
||||
|
||||
|
||||
def _execute_command(client, container, command, log=None, check_rc=False):
|
||||
if log:
|
||||
log('Executing {command} in {container}'.format(command=command, container=container))
|
||||
|
||||
data = {
|
||||
'Container': container,
|
||||
'User': '',
|
||||
'Privileged': False,
|
||||
'Tty': False,
|
||||
'AttachStdin': False,
|
||||
'AttachStdout': True,
|
||||
'AttachStderr': True,
|
||||
'Cmd': command,
|
||||
}
|
||||
|
||||
if 'detachKeys' in client._general_configs:
|
||||
data['detachKeys'] = client._general_configs['detachKeys']
|
||||
|
||||
try:
|
||||
exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
|
||||
except NotFound as e:
|
||||
raise_from(
|
||||
DockerFileCopyError('Could not find container "{container}"'.format(container=container)),
|
||||
e,
|
||||
)
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise_from(
|
||||
DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)),
|
||||
e,
|
||||
)
|
||||
raise
|
||||
exec_id = exec_data['Id']
|
||||
|
||||
data = {
|
||||
'Tty': False,
|
||||
'Detach': False
|
||||
}
|
||||
stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, stream=False, demux=True, tty=False)
|
||||
|
||||
result = client.get_json('/exec/{0}/json', exec_id)
|
||||
|
||||
rc = result.get('ExitCode') or 0
|
||||
stdout = stdout or b''
|
||||
stderr = stderr or b''
|
||||
|
||||
if log:
|
||||
log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr))
|
||||
|
||||
if check_rc and rc != 0:
|
||||
raise DockerUnexpectedError(
|
||||
'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}'
|
||||
.format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr)
|
||||
)
|
||||
|
||||
return rc, stdout, stderr
|
||||
|
||||
|
||||
def determine_user_group(client, container, log=None):
|
||||
dummy, stdout, stderr = _execute_command(client, container, ['/bin/sh', '-c', 'id -u && id -g'], check_rc=True, log=log)
|
||||
|
||||
stdout_lines = stdout.splitlines()
|
||||
if len(stdout_lines) != 2:
|
||||
raise DockerUnexpectedError(
|
||||
'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}'
|
||||
.format(container=container, lc=len(stdout_lines), stdout=stdout)
|
||||
)
|
||||
|
||||
user_id, group_id = stdout_lines
|
||||
try:
|
||||
return int(user_id), int(group_id)
|
||||
except ValueError:
|
||||
raise DockerUnexpectedError(
|
||||
'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead'
|
||||
.format(container=container, l1=user_id, l2=group_id)
|
||||
)
|
@ -0,0 +1,194 @@
|
||||
# Copyright 2022 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
class ImageArchiveManifestSummary(object):
|
||||
'''
|
||||
Represents data extracted from a manifest.json found in the tar archive output of the
|
||||
"docker image save some:tag > some.tar" command.
|
||||
'''
|
||||
|
||||
def __init__(self, image_id, repo_tags):
|
||||
'''
|
||||
:param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
|
||||
:type image_id: str
|
||||
:param repo_tags Docker image names, e.g. ["hello-world:latest"]
|
||||
:type repo_tags: list[str]
|
||||
'''
|
||||
|
||||
self.image_id = image_id
|
||||
self.repo_tags = repo_tags
|
||||
|
||||
|
||||
class ImageArchiveInvalidException(Exception):
|
||||
def __init__(self, message, cause):
|
||||
'''
|
||||
:param message: Exception message
|
||||
:type message: str
|
||||
:param cause: Inner exception that this exception wraps
|
||||
:type cause: Exception | None
|
||||
'''
|
||||
|
||||
super(ImageArchiveInvalidException, self).__init__(message)
|
||||
|
||||
# Python 2 does not support causes
|
||||
self.cause = cause
|
||||
|
||||
|
||||
def api_image_id(archive_image_id):
|
||||
'''
|
||||
Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
|
||||
that represents the same image hash, but in the format presented by the Docker Engine API.
|
||||
|
||||
:param archive_image_id: plain image hash
|
||||
:type archive_image_id: str
|
||||
|
||||
:returns: Prefixed hash used by REST api
|
||||
:rtype: str
|
||||
'''
|
||||
|
||||
return 'sha256:%s' % archive_image_id
|
||||
|
||||
|
||||
def load_archived_image_manifest(archive_path):
|
||||
'''
|
||||
Attempts to get image IDs and image names from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with one or more entries,
|
||||
and every entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:type archive_path: str
|
||||
|
||||
:return: None, if no file at archive_path, or a list of ImageArchiveManifestSummary objects.
|
||||
:rtype: ImageArchiveManifestSummary
|
||||
'''
|
||||
|
||||
try:
|
||||
# FileNotFoundError does not exist in Python 2
|
||||
if not os.path.isfile(archive_path):
|
||||
return None
|
||||
|
||||
tf = tarfile.open(archive_path, 'r')
|
||||
try:
|
||||
try:
|
||||
ef = tf.extractfile('manifest.json')
|
||||
try:
|
||||
text = ef.read().decode('utf-8')
|
||||
manifest = json.loads(text)
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to decode and deserialize manifest.json: %s" % to_native(exc),
|
||||
exc
|
||||
)
|
||||
finally:
|
||||
# In Python 2.6, this does not have __exit__
|
||||
ef.close()
|
||||
|
||||
if len(manifest) == 0:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Expected to have at least one entry in manifest.json but found none",
|
||||
None
|
||||
)
|
||||
|
||||
result = []
|
||||
for index, meta in enumerate(manifest):
|
||||
try:
|
||||
config_file = meta['Config']
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to get Config entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)),
|
||||
exc
|
||||
)
|
||||
|
||||
# Extracts hash without 'sha256:' prefix
|
||||
try:
|
||||
# Strip off .json filename extension, leaving just the hash.
|
||||
image_id = os.path.splitext(config_file)[0]
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)),
|
||||
exc
|
||||
)
|
||||
|
||||
for prefix in (
|
||||
'blobs/sha256/', # Moby 25.0.0, Docker API 1.44
|
||||
):
|
||||
if image_id.startswith(prefix):
|
||||
image_id = image_id[len(prefix):]
|
||||
|
||||
try:
|
||||
repo_tags = meta['RepoTags']
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to get RepoTags entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)),
|
||||
exc
|
||||
)
|
||||
|
||||
result.append(ImageArchiveManifestSummary(
|
||||
image_id=image_id,
|
||||
repo_tags=repo_tags
|
||||
))
|
||||
return result
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc)),
|
||||
exc
|
||||
)
|
||||
|
||||
finally:
|
||||
# In Python 2.6, TarFile does not have __exit__
|
||||
tf.close()
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc)), exc)
|
||||
|
||||
|
||||
def archived_image_manifest(archive_path):
|
||||
'''
|
||||
Attempts to get Image.Id and image name from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with a single entry,
|
||||
and the entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:type archive_path: str
|
||||
|
||||
:return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
|
||||
:rtype: ImageArchiveManifestSummary
|
||||
'''
|
||||
|
||||
results = load_archived_image_manifest(archive_path)
|
||||
if results is None:
|
||||
return None
|
||||
if len(results) == 1:
|
||||
return results[0]
|
||||
raise ImageArchiveInvalidException(
|
||||
"Expected to have one entry in manifest.json but found %s" % len(results),
|
||||
None
|
||||
)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,893 @@
|
||||
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from time import sleep
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
is_image_name_id,
|
||||
sanitize_result,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
|
||||
|
||||
|
||||
class Container(DockerBaseClass):
|
||||
def __init__(self, container, engine_driver):
|
||||
super(Container, self).__init__()
|
||||
self.raw = container
|
||||
self.id = None
|
||||
self.image = None
|
||||
self.image_name = None
|
||||
self.container = container
|
||||
self.engine_driver = engine_driver
|
||||
if container:
|
||||
self.id = engine_driver.get_container_id(container)
|
||||
self.image = engine_driver.get_image_from_container(container)
|
||||
self.image_name = engine_driver.get_image_name_from_container(container)
|
||||
self.log(self.container, pretty_print=True)
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return True if self.container else False
|
||||
|
||||
@property
|
||||
def removing(self):
|
||||
return self.engine_driver.is_container_removing(self.container) if self.container else False
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return self.engine_driver.is_container_running(self.container) if self.container else False
|
||||
|
||||
@property
|
||||
def paused(self):
|
||||
return self.engine_driver.is_container_paused(self.container) if self.container else False
|
||||
|
||||
|
||||
class ContainerManager(DockerBaseClass):
|
||||
def __init__(self, module, engine_driver, client, active_options):
|
||||
self.module = module
|
||||
self.engine_driver = engine_driver
|
||||
self.client = client
|
||||
self.options = active_options
|
||||
self.all_options = self._collect_all_options(active_options)
|
||||
self.check_mode = self.module.check_mode
|
||||
self.param_cleanup = self.module.params['cleanup']
|
||||
self.param_container_default_behavior = self.module.params['container_default_behavior']
|
||||
self.param_default_host_ip = self.module.params['default_host_ip']
|
||||
self.param_debug = self.module.params['debug']
|
||||
self.param_force_kill = self.module.params['force_kill']
|
||||
self.param_image = self.module.params['image']
|
||||
self.param_image_comparison = self.module.params['image_comparison']
|
||||
self.param_image_label_mismatch = self.module.params['image_label_mismatch']
|
||||
self.param_image_name_mismatch = self.module.params['image_name_mismatch']
|
||||
self.param_keep_volumes = self.module.params['keep_volumes']
|
||||
self.param_kill_signal = self.module.params['kill_signal']
|
||||
self.param_name = self.module.params['name']
|
||||
self.param_networks_cli_compatible = self.module.params['networks_cli_compatible']
|
||||
self.param_output_logs = self.module.params['output_logs']
|
||||
self.param_paused = self.module.params['paused']
|
||||
self.param_pull = self.module.params['pull']
|
||||
if self.param_pull is True:
|
||||
self.param_pull = 'always'
|
||||
if self.param_pull is False:
|
||||
self.param_pull = 'missing'
|
||||
self.param_pull_check_mode_behavior = self.module.params['pull_check_mode_behavior']
|
||||
self.param_recreate = self.module.params['recreate']
|
||||
self.param_removal_wait_timeout = self.module.params['removal_wait_timeout']
|
||||
self.param_healthy_wait_timeout = self.module.params['healthy_wait_timeout']
|
||||
if self.param_healthy_wait_timeout <= 0:
|
||||
self.param_healthy_wait_timeout = None
|
||||
self.param_restart = self.module.params['restart']
|
||||
self.param_state = self.module.params['state']
|
||||
self._parse_comparisons()
|
||||
self._update_params()
|
||||
self.results = {'changed': False, 'actions': []}
|
||||
self.diff = {}
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.facts = {}
|
||||
if self.param_default_host_ip:
|
||||
valid_ip = False
|
||||
if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip):
|
||||
valid_ip = True
|
||||
if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip):
|
||||
valid_ip = True
|
||||
if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip):
|
||||
self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip)
|
||||
valid_ip = True
|
||||
if not valid_ip:
|
||||
self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
|
||||
'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip))
|
||||
|
||||
def _collect_all_options(self, active_options):
|
||||
all_options = {}
|
||||
for options in active_options:
|
||||
for option in options.options:
|
||||
all_options[option.name] = option
|
||||
return all_options
|
||||
|
||||
def _collect_all_module_params(self):
|
||||
all_module_options = set()
|
||||
for option, data in self.module.argument_spec.items():
|
||||
all_module_options.add(option)
|
||||
if 'aliases' in data:
|
||||
for alias in data['aliases']:
|
||||
all_module_options.add(alias)
|
||||
return all_module_options
|
||||
|
||||
def _parse_comparisons(self):
|
||||
# Keep track of all module params and all option aliases
|
||||
all_module_options = self._collect_all_module_params()
|
||||
comp_aliases = {}
|
||||
for option_name, option in self.all_options.items():
|
||||
if option.not_an_ansible_option:
|
||||
continue
|
||||
comp_aliases[option_name] = option_name
|
||||
for alias in option.ansible_aliases:
|
||||
comp_aliases[alias] = option_name
|
||||
# Process comparisons specified by user
|
||||
if self.module.params.get('comparisons'):
|
||||
# If '*' appears in comparisons, process it first
|
||||
if '*' in self.module.params['comparisons']:
|
||||
value = self.module.params['comparisons']['*']
|
||||
if value not in ('strict', 'ignore'):
|
||||
self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
|
||||
for option in self.all_options.values():
|
||||
if option.name == 'networks':
|
||||
# `networks` is special: only update if
|
||||
# some value is actually specified
|
||||
if self.module.params['networks'] is None:
|
||||
continue
|
||||
option.comparison = value
|
||||
# Now process all other comparisons.
|
||||
comp_aliases_used = {}
|
||||
for key, value in self.module.params['comparisons'].items():
|
||||
if key == '*':
|
||||
continue
|
||||
# Find main key
|
||||
key_main = comp_aliases.get(key)
|
||||
if key_main is None:
|
||||
if key_main in all_module_options:
|
||||
self.fail("The module option '%s' cannot be specified in the comparisons dict, "
|
||||
"since it does not correspond to container's state!" % key)
|
||||
if key not in self.all_options or self.all_options[key].not_an_ansible_option:
|
||||
self.fail("Unknown module option '%s' in comparisons dict!" % key)
|
||||
key_main = key
|
||||
if key_main in comp_aliases_used:
|
||||
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
|
||||
comp_aliases_used[key_main] = key
|
||||
# Check value and update accordingly
|
||||
if value in ('strict', 'ignore'):
|
||||
self.all_options[key_main].comparison = value
|
||||
elif value == 'allow_more_present':
|
||||
if self.all_options[key_main].comparison_type == 'value':
|
||||
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
|
||||
self.all_options[key_main].comparison = value
|
||||
else:
|
||||
self.fail("Unknown comparison mode '%s'!" % value)
|
||||
# Copy values
|
||||
for option in self.all_options.values():
|
||||
if option.copy_comparison_from is not None:
|
||||
option.comparison = self.all_options[option.copy_comparison_from].comparison
|
||||
|
||||
def _update_params(self):
|
||||
if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None:
|
||||
# Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
|
||||
# (assuming no explicit value is specified for network_mode)
|
||||
self.module.params['network_mode'] = self.module.params['networks'][0]['name']
|
||||
if self.param_container_default_behavior == 'compatibility':
|
||||
old_default_values = dict(
|
||||
auto_remove=False,
|
||||
detach=True,
|
||||
init=False,
|
||||
interactive=False,
|
||||
memory='0',
|
||||
paused=False,
|
||||
privileged=False,
|
||||
read_only=False,
|
||||
tty=False,
|
||||
)
|
||||
for param, value in old_default_values.items():
|
||||
if self.module.params[param] is None:
|
||||
self.module.params[param] = value
|
||||
|
||||
def fail(self, *args, **kwargs):
|
||||
self.client.fail(*args, **kwargs)
|
||||
|
||||
def run(self):
|
||||
if self.param_state in ('stopped', 'started', 'present', 'healthy'):
|
||||
self.present(self.param_state)
|
||||
elif self.param_state == 'absent':
|
||||
self.absent()
|
||||
|
||||
if not self.check_mode and not self.param_debug:
|
||||
self.results.pop('actions')
|
||||
|
||||
if self.module._diff or self.param_debug:
|
||||
self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
|
||||
self.results['diff'] = self.diff
|
||||
|
||||
if self.facts:
|
||||
self.results['container'] = self.facts
|
||||
|
||||
def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None, health_state=False):
|
||||
delay = 1.0
|
||||
total_wait = 0
|
||||
while True:
|
||||
# Inspect container
|
||||
result = self.engine_driver.inspect_container_by_id(self.client, container_id)
|
||||
if result is None:
|
||||
if accept_removal:
|
||||
return result
|
||||
msg = 'Encontered vanished container while waiting for container "{0}"'
|
||||
self.fail(msg.format(container_id))
|
||||
# Check container state
|
||||
state_info = result.get('State') or {}
|
||||
if health_state:
|
||||
state_info = state_info.get('Health') or {}
|
||||
state = state_info.get('Status')
|
||||
if complete_states is not None and state in complete_states:
|
||||
return result
|
||||
if wait_states is not None and state not in wait_states:
|
||||
msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
|
||||
self.fail(msg.format(container_id, state), container=result)
|
||||
# Wait
|
||||
if max_wait is not None:
|
||||
if total_wait > max_wait or delay < 1E-4:
|
||||
msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
|
||||
self.fail(msg.format(container_id, max_wait), container=result)
|
||||
if total_wait + delay > max_wait:
|
||||
delay = max_wait - total_wait
|
||||
sleep(delay)
|
||||
total_wait += delay
|
||||
# Exponential backoff, but never wait longer than 10 seconds
|
||||
# (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
|
||||
# until the maximal 10 seconds delay is reached. By then, the
|
||||
# code will have slept for ~1.5 minutes.)
|
||||
delay = min(delay * 1.1, 10)
|
||||
|
||||
def _collect_params(self, active_options):
|
||||
parameters = []
|
||||
for options in active_options:
|
||||
values = {}
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
for option in options.all_options:
|
||||
if not option.not_an_ansible_option and self.module.params[option.name] is not None:
|
||||
values[option.name] = self.module.params[option.name]
|
||||
values = options.preprocess(self.module, values)
|
||||
engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values)
|
||||
parameters.append((options, values))
|
||||
return parameters
|
||||
|
||||
def _needs_container_image(self):
|
||||
for options, values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if engine.needs_container_image(values):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _needs_host_info(self):
|
||||
for options, values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if engine.needs_host_info(values):
|
||||
return True
|
||||
return False
|
||||
|
||||
def present(self, state):
|
||||
self.parameters = self._collect_params(self.options)
|
||||
container = self._get_container(self.param_name)
|
||||
was_running = container.running
|
||||
was_paused = container.paused
|
||||
container_created = False
|
||||
|
||||
# If the image parameter was passed then we need to deal with the image
|
||||
# version comparison. Otherwise we handle this depending on whether
|
||||
# the container already runs or not; in the former case, in case the
|
||||
# container needs to be restarted, we use the existing container's
|
||||
# image ID.
|
||||
image, container_image, comparison_image = self._get_image(
|
||||
container, needs_container_image=self._needs_container_image())
|
||||
self.log(image, pretty_print=True)
|
||||
host_info = self.engine_driver.get_host_info(self.client) if self._needs_host_info() else None
|
||||
if not container.exists or container.removing:
|
||||
# New container
|
||||
if container.removing:
|
||||
self.log('Found container in removal phase')
|
||||
else:
|
||||
self.log('No container found')
|
||||
if not self.param_image:
|
||||
self.fail('Cannot create container when image is not specified!')
|
||||
self.diff_tracker.add('exists', parameter=True, active=False)
|
||||
if container.removing and not self.check_mode:
|
||||
# Wait for container to be removed before trying to create it
|
||||
self.wait_for_state(
|
||||
container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
|
||||
new_container = self.container_create(self.param_image)
|
||||
if new_container:
|
||||
container = new_container
|
||||
container_created = True
|
||||
else:
|
||||
# Existing container
|
||||
different, differences = self.has_different_configuration(container, container_image, comparison_image, host_info)
|
||||
image_different = False
|
||||
if self.all_options['image'].comparison == 'strict':
|
||||
image_different = self._image_is_different(image, container)
|
||||
if self.param_image_name_mismatch == 'recreate' and self.param_image is not None and self.param_image != container.image_name:
|
||||
different = True
|
||||
self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name)
|
||||
if image_different or different or self.param_recreate:
|
||||
self.diff_tracker.merge(differences)
|
||||
self.diff['differences'] = differences.get_legacy_docker_container_diffs()
|
||||
if image_different:
|
||||
self.diff['image_different'] = True
|
||||
self.log("differences")
|
||||
self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
|
||||
image_to_use = self.param_image
|
||||
if not image_to_use and container and container.image:
|
||||
image_to_use = container.image
|
||||
if not image_to_use:
|
||||
self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
|
||||
if container.running:
|
||||
self.container_stop(container.id)
|
||||
self.container_remove(container.id)
|
||||
if not self.check_mode:
|
||||
self.wait_for_state(
|
||||
container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
|
||||
new_container = self.container_create(image_to_use)
|
||||
if new_container:
|
||||
container = new_container
|
||||
container_created = True
|
||||
comparison_image = image
|
||||
|
||||
if container and container.exists:
|
||||
container = self.update_limits(container, container_image, comparison_image, host_info)
|
||||
container = self.update_networks(container, container_created)
|
||||
|
||||
if state in ('started', 'healthy') and not container.running:
|
||||
self.diff_tracker.add('running', parameter=True, active=was_running)
|
||||
container = self.container_start(container.id)
|
||||
elif state in ('started', 'healthy') and self.param_restart:
|
||||
self.diff_tracker.add('running', parameter=True, active=was_running)
|
||||
self.diff_tracker.add('restarted', parameter=True, active=False)
|
||||
container = self.container_restart(container.id)
|
||||
elif state == 'stopped' and container.running:
|
||||
self.diff_tracker.add('running', parameter=False, active=was_running)
|
||||
self.container_stop(container.id)
|
||||
container = self._get_container(container.id)
|
||||
|
||||
if state in ('started', 'healthy') and self.param_paused is not None and container.paused != self.param_paused:
|
||||
self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused)
|
||||
if not self.check_mode:
|
||||
try:
|
||||
if self.param_paused:
|
||||
self.engine_driver.pause_container(self.client, container.id)
|
||||
else:
|
||||
self.engine_driver.unpause_container(self.client, container.id)
|
||||
except Exception as exc:
|
||||
self.fail("Error %s container %s: %s" % (
|
||||
"pausing" if self.param_paused else "unpausing", container.id, to_native(exc)
|
||||
))
|
||||
container = self._get_container(container.id)
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append(dict(set_paused=self.param_paused))
|
||||
|
||||
self.facts = container.raw
|
||||
|
||||
if state == 'healthy' and not self.check_mode:
|
||||
# `None` means that no health check enabled; simply treat this as 'healthy'
|
||||
inspect_result = self.wait_for_state(
|
||||
container.id,
|
||||
wait_states=['starting', 'unhealthy'],
|
||||
complete_states=['healthy', None],
|
||||
max_wait=self.param_healthy_wait_timeout,
|
||||
health_state=True,
|
||||
)
|
||||
if inspect_result:
|
||||
# Return the latest inspection results retrieved
|
||||
self.facts = inspect_result
|
||||
|
||||
def absent(self):
|
||||
container = self._get_container(self.param_name)
|
||||
if container.exists:
|
||||
if container.running:
|
||||
self.diff_tracker.add('running', parameter=False, active=True)
|
||||
self.container_stop(container.id)
|
||||
self.diff_tracker.add('exists', parameter=False, active=True)
|
||||
self.container_remove(container.id)
|
||||
|
||||
def _output_logs(self, msg):
|
||||
self.module.log(msg=msg)
|
||||
|
||||
def _get_container(self, container):
|
||||
'''
|
||||
Expects container ID or Name. Returns a container object
|
||||
'''
|
||||
container = self.engine_driver.inspect_container_by_name(self.client, container)
|
||||
return Container(container, self.engine_driver)
|
||||
|
||||
def _get_container_image(self, container, fallback=None):
|
||||
if not container.exists or container.removing:
|
||||
return fallback
|
||||
image = container.image
|
||||
if is_image_name_id(image):
|
||||
image = self.engine_driver.inspect_image_by_id(self.client, image)
|
||||
else:
|
||||
repository, tag = parse_repository_tag(image)
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
|
||||
return image or fallback
|
||||
|
||||
def _get_image(self, container, needs_container_image=False):
|
||||
image_parameter = self.param_image
|
||||
get_container_image = needs_container_image or not image_parameter
|
||||
container_image = self._get_container_image(container) if get_container_image else None
|
||||
if container_image:
|
||||
self.log("current image")
|
||||
self.log(container_image, pretty_print=True)
|
||||
if not image_parameter:
|
||||
self.log('No image specified')
|
||||
return None, container_image, container_image
|
||||
if is_image_name_id(image_parameter):
|
||||
image = self.engine_driver.inspect_image_by_id(self.client, image_parameter)
|
||||
if image is None:
|
||||
self.client.fail("Cannot find image with ID %s" % (image_parameter, ))
|
||||
else:
|
||||
repository, tag = parse_repository_tag(image_parameter)
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
|
||||
if not image and self.param_pull == "never":
|
||||
self.client.fail("Cannot find image with name %s:%s, and pull=never" % (repository, tag))
|
||||
if not image or self.param_pull == "always":
|
||||
if not self.check_mode:
|
||||
self.log("Pull the image.")
|
||||
image, alreadyToLatest = self.engine_driver.pull_image(
|
||||
self.client, repository, tag, platform=self.module.params['platform'])
|
||||
if alreadyToLatest:
|
||||
self.results['changed'] = False
|
||||
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=False))
|
||||
else:
|
||||
self.results['changed'] = True
|
||||
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True))
|
||||
elif not image or self.param_pull_check_mode_behavior == 'always':
|
||||
# If the image is not there, or pull_check_mode_behavior == 'always', claim we'll
|
||||
# pull. (Implicitly: if the image is there, claim it already was latest unless
|
||||
# pull_check_mode_behavior == 'always'.)
|
||||
self.results['changed'] = True
|
||||
action = dict(pulled_image="%s:%s" % (repository, tag))
|
||||
if not image:
|
||||
action['changed'] = True
|
||||
self.results['actions'].append(action)
|
||||
|
||||
self.log("image")
|
||||
self.log(image, pretty_print=True)
|
||||
|
||||
comparison_image = image
|
||||
if self.param_image_comparison == 'current-image':
|
||||
if not get_container_image:
|
||||
container_image = self._get_container_image(container)
|
||||
comparison_image = container_image
|
||||
|
||||
return image, container_image, comparison_image
|
||||
|
||||
def _image_is_different(self, image, container):
|
||||
if image and image.get('Id'):
|
||||
if container and container.image:
|
||||
if image.get('Id') != container.image:
|
||||
self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _compose_create_parameters(self, image):
|
||||
params = {}
|
||||
for options, values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if engine.can_set_value(self.engine_driver.get_api_version(self.client)):
|
||||
engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values)
|
||||
params['Image'] = image
|
||||
return params
|
||||
|
||||
def _record_differences(self, differences, options, param_values, engine, container, container_image, image, host_info):
|
||||
container_values = engine.get_value(
|
||||
self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options, container_image, host_info)
|
||||
expected_values = engine.get_expected_values(
|
||||
self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy(), host_info)
|
||||
for option in options.options:
|
||||
if option.name in expected_values:
|
||||
param_value = expected_values[option.name]
|
||||
container_value = container_values.get(option.name)
|
||||
match = engine.compare_value(option, param_value, container_value)
|
||||
|
||||
if not match:
|
||||
# No match.
|
||||
if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client),
|
||||
option, image, container_value, param_value, host_info):
|
||||
# Ignore the result
|
||||
continue
|
||||
|
||||
# Record the differences
|
||||
p = param_value
|
||||
c = container_value
|
||||
if option.comparison_type == 'set':
|
||||
# Since the order does not matter, sort so that the diff output is better.
|
||||
if p is not None:
|
||||
p = sorted(p)
|
||||
if c is not None:
|
||||
c = sorted(c)
|
||||
elif option.comparison_type == 'set(dict)':
|
||||
# Since the order does not matter, sort so that the diff output is better.
|
||||
if option.name == 'expected_mounts':
|
||||
# For selected values, use one entry as key
|
||||
def sort_key_fn(x):
|
||||
return x['target']
|
||||
else:
|
||||
# We sort the list of dictionaries by using the sorted items of a dict as its key.
|
||||
def sort_key_fn(x):
|
||||
return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
|
||||
if p is not None:
|
||||
p = sorted(p, key=sort_key_fn)
|
||||
if c is not None:
|
||||
c = sorted(c, key=sort_key_fn)
|
||||
differences.add(option.name, parameter=p, active=c)
|
||||
|
||||
def has_different_configuration(self, container, container_image, image, host_info):
|
||||
differences = DifferenceTracker()
|
||||
update_differences = DifferenceTracker()
|
||||
for options, param_values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if engine.can_update_value(self.engine_driver.get_api_version(self.client)):
|
||||
self._record_differences(update_differences, options, param_values, engine, container, container_image, image, host_info)
|
||||
else:
|
||||
self._record_differences(differences, options, param_values, engine, container, container_image, image, host_info)
|
||||
has_differences = not differences.empty
|
||||
# Only consider differences of properties that can be updated when there are also other differences
|
||||
if has_differences:
|
||||
differences.merge(update_differences)
|
||||
return has_differences, differences
|
||||
|
||||
def has_different_resource_limits(self, container, container_image, image, host_info):
|
||||
differences = DifferenceTracker()
|
||||
for options, param_values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
|
||||
continue
|
||||
self._record_differences(differences, options, param_values, engine, container, container_image, image, host_info)
|
||||
has_differences = not differences.empty
|
||||
return has_differences, differences
|
||||
|
||||
def _compose_update_parameters(self):
|
||||
result = {}
|
||||
for options, values in self.parameters:
|
||||
engine = options.get_engine(self.engine_driver.name)
|
||||
if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
|
||||
continue
|
||||
engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values)
|
||||
return result
|
||||
|
||||
def update_limits(self, container, container_image, image, host_info):
|
||||
limits_differ, different_limits = self.has_different_resource_limits(container, container_image, image, host_info)
|
||||
if limits_differ:
|
||||
self.log("limit differences:")
|
||||
self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
|
||||
self.diff_tracker.merge(different_limits)
|
||||
if limits_differ and not self.check_mode:
|
||||
self.container_update(container.id, self._compose_update_parameters())
|
||||
return self._get_container(container.id)
|
||||
return container
|
||||
|
||||
def has_network_differences(self, container):
|
||||
'''
|
||||
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
|
||||
'''
|
||||
different = False
|
||||
differences = []
|
||||
|
||||
if not self.module.params['networks']:
|
||||
return different, differences
|
||||
|
||||
if not container.container.get('NetworkSettings'):
|
||||
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
|
||||
|
||||
connected_networks = container.container['NetworkSettings']['Networks']
|
||||
for network in self.module.params['networks']:
|
||||
network_info = connected_networks.get(network['name'])
|
||||
if network_info is None:
|
||||
different = True
|
||||
differences.append(dict(
|
||||
parameter=network,
|
||||
container=None
|
||||
))
|
||||
else:
|
||||
diff = False
|
||||
network_info_ipam = network_info.get('IPAMConfig') or {}
|
||||
if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
|
||||
diff = True
|
||||
if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
|
||||
diff = True
|
||||
if network.get('aliases'):
|
||||
if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
|
||||
diff = True
|
||||
if network.get('links'):
|
||||
expected_links = []
|
||||
for link, alias in network['links']:
|
||||
expected_links.append("%s:%s" % (link, alias))
|
||||
if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
|
||||
diff = True
|
||||
if network.get('mac_address') and network['mac_address'] != network_info.get('MacAddress'):
|
||||
diff = True
|
||||
if diff:
|
||||
different = True
|
||||
differences.append(dict(
|
||||
parameter=network,
|
||||
container=dict(
|
||||
name=network['name'],
|
||||
ipv4_address=network_info_ipam.get('IPv4Address'),
|
||||
ipv6_address=network_info_ipam.get('IPv6Address'),
|
||||
aliases=network_info.get('Aliases'),
|
||||
links=network_info.get('Links'),
|
||||
mac_address=network_info.get('MacAddress'),
|
||||
)
|
||||
))
|
||||
return different, differences
|
||||
|
||||
def has_extra_networks(self, container):
|
||||
'''
|
||||
Check if the container is connected to non-requested networks
|
||||
'''
|
||||
extra_networks = []
|
||||
extra = False
|
||||
|
||||
if not container.container.get('NetworkSettings'):
|
||||
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
|
||||
|
||||
connected_networks = container.container['NetworkSettings'].get('Networks')
|
||||
if connected_networks:
|
||||
for network, network_config in connected_networks.items():
|
||||
keep = False
|
||||
if self.module.params['networks']:
|
||||
for expected_network in self.module.params['networks']:
|
||||
if expected_network['name'] == network:
|
||||
keep = True
|
||||
if not keep:
|
||||
extra = True
|
||||
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
|
||||
return extra, extra_networks
|
||||
|
||||
def update_networks(self, container, container_created):
|
||||
updated_container = container
|
||||
if self.all_options['networks'].comparison != 'ignore' or container_created:
|
||||
has_network_differences, network_differences = self.has_network_differences(container)
|
||||
if has_network_differences:
|
||||
if self.diff.get('differences'):
|
||||
self.diff['differences'].append(dict(network_differences=network_differences))
|
||||
else:
|
||||
self.diff['differences'] = [dict(network_differences=network_differences)]
|
||||
for netdiff in network_differences:
|
||||
self.diff_tracker.add(
|
||||
'network.{0}'.format(netdiff['parameter']['name']),
|
||||
parameter=netdiff['parameter'],
|
||||
active=netdiff['container']
|
||||
)
|
||||
self.results['changed'] = True
|
||||
updated_container = self._add_networks(container, network_differences)
|
||||
|
||||
purge_networks = self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None
|
||||
if purge_networks:
|
||||
has_extra_networks, extra_networks = self.has_extra_networks(container)
|
||||
if has_extra_networks:
|
||||
if self.diff.get('differences'):
|
||||
self.diff['differences'].append(dict(purge_networks=extra_networks))
|
||||
else:
|
||||
self.diff['differences'] = [dict(purge_networks=extra_networks)]
|
||||
for extra_network in extra_networks:
|
||||
self.diff_tracker.add(
|
||||
'network.{0}'.format(extra_network['name']),
|
||||
active=extra_network
|
||||
)
|
||||
self.results['changed'] = True
|
||||
updated_container = self._purge_networks(container, extra_networks)
|
||||
return updated_container
|
||||
|
||||
def _add_networks(self, container, differences):
|
||||
for diff in differences:
|
||||
# remove the container from the network, if connected
|
||||
if diff.get('container'):
|
||||
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id'])
|
||||
except Exception as exc:
|
||||
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
|
||||
to_native(exc)))
|
||||
# connect to the network
|
||||
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter']))
|
||||
if not self.check_mode:
|
||||
params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')}
|
||||
try:
|
||||
self.log("Connecting container to network %s" % diff['parameter']['id'])
|
||||
self.log(params, pretty_print=True)
|
||||
self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params)
|
||||
except Exception as exc:
|
||||
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
|
||||
return self._get_container(container.id)
|
||||
|
||||
def _purge_networks(self, container, networks):
|
||||
for network in networks:
|
||||
self.results['actions'].append(dict(removed_from_network=network['name']))
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name'])
|
||||
except Exception as exc:
|
||||
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
|
||||
to_native(exc)))
|
||||
return self._get_container(container.id)
|
||||
|
||||
def container_create(self, image):
|
||||
create_parameters = self._compose_create_parameters(image)
|
||||
self.log("create container")
|
||||
self.log("image: %s parameters:" % image)
|
||||
self.log(create_parameters, pretty_print=True)
|
||||
networks = {}
|
||||
if self.param_networks_cli_compatible and self.module.params['networks']:
|
||||
network_list = self.module.params['networks']
|
||||
if not self.engine_driver.create_container_supports_more_than_one_network(self.client):
|
||||
network_list = network_list[:1]
|
||||
for network in network_list:
|
||||
networks[network['name']] = {
|
||||
key: value for key, value in network.items()
|
||||
if key not in ('name', 'id')
|
||||
}
|
||||
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters, networks=networks))
|
||||
self.results['changed'] = True
|
||||
new_container = None
|
||||
if not self.check_mode:
|
||||
try:
|
||||
container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters, networks=networks)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating container: %s" % to_native(exc))
|
||||
return self._get_container(container_id)
|
||||
return new_container
|
||||
|
||||
def container_start(self, container_id):
|
||||
self.log("start container %s" % (container_id))
|
||||
self.results['actions'].append(dict(started=container_id))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.start_container(self.client, container_id)
|
||||
except Exception as exc:
|
||||
self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
|
||||
|
||||
if self.module.params['detach'] is False:
|
||||
status = self.engine_driver.wait_for_container(self.client, container_id)
|
||||
self.client.fail_results['status'] = status
|
||||
self.results['status'] = status
|
||||
|
||||
if self.module.params['auto_remove']:
|
||||
output = "Cannot retrieve result as auto_remove is enabled"
|
||||
if self.param_output_logs:
|
||||
self.module.warn('Cannot output_logs if auto_remove is enabled!')
|
||||
else:
|
||||
output, real_output = self.engine_driver.get_container_output(self.client, container_id)
|
||||
if real_output and self.param_output_logs:
|
||||
self._output_logs(msg=output)
|
||||
|
||||
if self.param_cleanup:
|
||||
self.container_remove(container_id, force=True)
|
||||
insp = self._get_container(container_id)
|
||||
if insp.raw:
|
||||
insp.raw['Output'] = output
|
||||
else:
|
||||
insp.raw = dict(Output=output)
|
||||
if status != 0:
|
||||
# Set `failed` to True and return output as msg
|
||||
self.results['failed'] = True
|
||||
self.results['msg'] = output
|
||||
return insp
|
||||
return self._get_container(container_id)
|
||||
|
||||
def container_remove(self, container_id, link=False, force=False):
|
||||
volume_state = (not self.param_keep_volumes)
|
||||
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
|
||||
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force)
|
||||
except Exception as exc:
|
||||
self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
|
||||
|
||||
def container_update(self, container_id, update_parameters):
|
||||
if update_parameters:
|
||||
self.log("update container %s" % (container_id))
|
||||
self.log(update_parameters, pretty_print=True)
|
||||
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.update_container(self.client, container_id, update_parameters)
|
||||
except Exception as exc:
|
||||
self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
|
||||
return self._get_container(container_id)
|
||||
|
||||
def container_kill(self, container_id):
|
||||
self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal)
|
||||
except Exception as exc:
|
||||
self.fail("Error killing container %s: %s" % (container_id, to_native(exc)))
|
||||
|
||||
def container_restart(self, container_id):
|
||||
self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout']))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10)
|
||||
except Exception as exc:
|
||||
self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
|
||||
return self._get_container(container_id)
|
||||
|
||||
def container_stop(self, container_id):
|
||||
if self.param_force_kill:
|
||||
self.container_kill(container_id)
|
||||
return
|
||||
self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout']))
|
||||
self.results['changed'] = True
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout'])
|
||||
except Exception as exc:
|
||||
self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
|
||||
|
||||
|
||||
def run_module(engine_driver):
|
||||
module, active_options, client = engine_driver.setup(
|
||||
argument_spec=dict(
|
||||
cleanup=dict(type='bool', default=False),
|
||||
comparisons=dict(type='dict'),
|
||||
container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
|
||||
command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'),
|
||||
default_host_ip=dict(type='str'),
|
||||
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
|
||||
image=dict(type='str'),
|
||||
image_comparison=dict(type='str', choices=['desired-image', 'current-image'], default='desired-image'),
|
||||
image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'),
|
||||
image_name_mismatch=dict(type='str', choices=['ignore', 'recreate'], default='recreate'),
|
||||
keep_volumes=dict(type='bool', default=True),
|
||||
kill_signal=dict(type='str'),
|
||||
name=dict(type='str', required=True),
|
||||
networks_cli_compatible=dict(type='bool', default=True),
|
||||
output_logs=dict(type='bool', default=False),
|
||||
paused=dict(type='bool'),
|
||||
pull=dict(type='raw', choices=['never', 'missing', 'always', True, False], default='missing'),
|
||||
pull_check_mode_behavior=dict(type='str', choices=['image_not_present', 'always'], default='image_not_present'),
|
||||
recreate=dict(type='bool', default=False),
|
||||
removal_wait_timeout=dict(type='float'),
|
||||
restart=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='started', choices=['absent', 'present', 'healthy', 'started', 'stopped']),
|
||||
healthy_wait_timeout=dict(type='float', default=300),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['image']),
|
||||
],
|
||||
)
|
||||
|
||||
def execute():
|
||||
cm = ContainerManager(module, engine_driver, client, active_options)
|
||||
cm.run()
|
||||
module.exit_json(**sanitize_result(cm.results))
|
||||
|
||||
engine_driver.run(execute, client)
|
@ -0,0 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""Provide selectors import."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# Once we drop support for ansible-core 2.16, we can remove the try/except.
|
||||
|
||||
from sys import version_info as _python_version_info
|
||||
|
||||
|
||||
if _python_version_info < (3, 4):
|
||||
from ansible.module_utils.compat import selectors # noqa: F401, pylint: disable=unused-import
|
||||
else:
|
||||
import selectors # noqa: F401, pylint: disable=unused-import
|
@ -0,0 +1,206 @@
|
||||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import socket as pysocket
|
||||
import struct
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils import socket as docker_socket
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
|
||||
make_unblocking,
|
||||
shutdown_writing,
|
||||
write_to_socket,
|
||||
)
|
||||
|
||||
|
||||
PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
|
||||
|
||||
|
||||
class DockerSocketHandlerBase(object):
|
||||
def __init__(self, sock, selectors, log=None):
|
||||
make_unblocking(sock)
|
||||
|
||||
self._selectors = selectors
|
||||
if log is not None:
|
||||
self._log = log
|
||||
else:
|
||||
self._log = lambda msg: True
|
||||
self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
|
||||
|
||||
self._sock = sock
|
||||
self._block_done_callback = None
|
||||
self._block_buffer = []
|
||||
self._eof = False
|
||||
self._read_buffer = b''
|
||||
self._write_buffer = b''
|
||||
self._end_of_writing = False
|
||||
|
||||
self._current_stream = None
|
||||
self._current_missing = 0
|
||||
self._current_buffer = b''
|
||||
|
||||
self._selector = self._selectors.DefaultSelector()
|
||||
self._selector.register(self._sock, self._selectors.EVENT_READ)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
self._selector.close()
|
||||
|
||||
def set_block_done_callback(self, block_done_callback):
|
||||
self._block_done_callback = block_done_callback
|
||||
if self._block_done_callback is not None:
|
||||
while self._block_buffer:
|
||||
elt = self._block_buffer.remove(0)
|
||||
self._block_done_callback(*elt)
|
||||
|
||||
def _add_block(self, stream_id, data):
|
||||
if self._block_done_callback is not None:
|
||||
self._block_done_callback(stream_id, data)
|
||||
else:
|
||||
self._block_buffer.append((stream_id, data))
|
||||
|
||||
def _read(self):
|
||||
if self._eof:
|
||||
return
|
||||
if hasattr(self._sock, 'recv'):
|
||||
try:
|
||||
data = self._sock.recv(262144)
|
||||
except Exception as e:
|
||||
# After calling self._sock.shutdown(), OpenSSL's/urllib3's
|
||||
# WrappedSocket seems to eventually raise ZeroReturnError in
|
||||
# case of EOF
|
||||
if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
|
||||
self._eof = True
|
||||
return
|
||||
else:
|
||||
raise
|
||||
elif not PY2 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
|
||||
data = self._sock.read()
|
||||
else:
|
||||
data = os.read(self._sock.fileno())
|
||||
if data is None:
|
||||
# no data available
|
||||
return
|
||||
self._log('read {0} bytes'.format(len(data)))
|
||||
if len(data) == 0:
|
||||
# Stream EOF
|
||||
self._eof = True
|
||||
return
|
||||
self._read_buffer += data
|
||||
while len(self._read_buffer) > 0:
|
||||
if self._current_missing > 0:
|
||||
n = min(len(self._read_buffer), self._current_missing)
|
||||
self._current_buffer += self._read_buffer[:n]
|
||||
self._read_buffer = self._read_buffer[n:]
|
||||
self._current_missing -= n
|
||||
if self._current_missing == 0:
|
||||
self._add_block(self._current_stream, self._current_buffer)
|
||||
self._current_buffer = b''
|
||||
if len(self._read_buffer) < 8:
|
||||
break
|
||||
self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
|
||||
self._read_buffer = self._read_buffer[8:]
|
||||
if self._current_missing < 0:
|
||||
# Stream EOF (as reported by docker daemon)
|
||||
self._eof = True
|
||||
break
|
||||
|
||||
def _handle_end_of_writing(self):
|
||||
if self._end_of_writing and len(self._write_buffer) == 0:
|
||||
self._end_of_writing = False
|
||||
self._log('Shutting socket down for writing')
|
||||
shutdown_writing(self._sock, self._log)
|
||||
|
||||
def _write(self):
|
||||
if len(self._write_buffer) > 0:
|
||||
written = write_to_socket(self._sock, self._write_buffer)
|
||||
self._write_buffer = self._write_buffer[written:]
|
||||
self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)))
|
||||
if len(self._write_buffer) > 0:
|
||||
self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE)
|
||||
else:
|
||||
self._selector.modify(self._sock, self._selectors.EVENT_READ)
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def select(self, timeout=None, _internal_recursion=False):
|
||||
if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
|
||||
# When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
|
||||
# Channel object supports select(), but only for reading
|
||||
# (https://github.com/paramiko/paramiko/issues/695).
|
||||
if self._sock.send_ready():
|
||||
self._write()
|
||||
return True
|
||||
while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
|
||||
result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
|
||||
if self._sock.send_ready():
|
||||
self._read()
|
||||
result += 1
|
||||
if result > 0:
|
||||
return True
|
||||
if timeout is not None:
|
||||
timeout -= PARAMIKO_POLL_TIMEOUT
|
||||
self._log('select... ({0})'.format(timeout))
|
||||
events = self._selector.select(timeout)
|
||||
for key, event in events:
|
||||
if key.fileobj == self._sock:
|
||||
self._log(
|
||||
'select event read:{0} write:{1}'.format(
|
||||
event & self._selectors.EVENT_READ != 0,
|
||||
event & self._selectors.EVENT_WRITE != 0))
|
||||
if event & self._selectors.EVENT_READ != 0:
|
||||
self._read()
|
||||
if event & self._selectors.EVENT_WRITE != 0:
|
||||
self._write()
|
||||
result = len(events)
|
||||
if self._paramiko_read_workaround and len(self._write_buffer) > 0:
|
||||
if self._sock.send_ready():
|
||||
self._write()
|
||||
result += 1
|
||||
return result > 0
|
||||
|
||||
def is_eof(self):
|
||||
return self._eof
|
||||
|
||||
def end_of_writing(self):
|
||||
self._end_of_writing = True
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def consume(self):
|
||||
stdout = []
|
||||
stderr = []
|
||||
|
||||
def append_block(stream_id, data):
|
||||
if stream_id == docker_socket.STDOUT:
|
||||
stdout.append(data)
|
||||
elif stream_id == docker_socket.STDERR:
|
||||
stderr.append(data)
|
||||
else:
|
||||
raise ValueError('{0} is not a valid stream ID'.format(stream_id))
|
||||
|
||||
self.end_of_writing()
|
||||
|
||||
self.set_block_done_callback(append_block)
|
||||
while not self._eof:
|
||||
self.select()
|
||||
return b''.join(stdout), b''.join(stderr)
|
||||
|
||||
def write(self, str):
|
||||
self._write_buffer += str
|
||||
if len(self._write_buffer) == len(str):
|
||||
self._write()
|
||||
|
||||
|
||||
class DockerSocketHandlerModule(DockerSocketHandlerBase):
|
||||
def __init__(self, sock, module, selectors):
|
||||
super(DockerSocketHandlerModule, self).__init__(sock, selectors, module.debug)
|
@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import socket as pysocket
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
|
||||
def make_file_unblocking(file):
|
||||
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
|
||||
|
||||
def make_file_blocking(file):
|
||||
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
|
||||
def make_unblocking(sock):
|
||||
if hasattr(sock, '_sock'):
|
||||
sock._sock.setblocking(0)
|
||||
elif hasattr(sock, 'setblocking'):
|
||||
sock.setblocking(0)
|
||||
else:
|
||||
make_file_unblocking(sock)
|
||||
|
||||
|
||||
def _empty_writer(msg):
|
||||
pass
|
||||
|
||||
|
||||
def shutdown_writing(sock, log=_empty_writer):
|
||||
# FIXME: This does **not work with SSLSocket**! Apparently SSLSocket does not allow to send
|
||||
# a close_notify TLS alert without completely shutting down the connection.
|
||||
# Calling sock.shutdown(pysocket.SHUT_WR) simply turns of TLS encryption and from that
|
||||
# point on the raw encrypted data is returned when sock.recv() is called. :-(
|
||||
if hasattr(sock, 'shutdown_write'):
|
||||
sock.shutdown_write()
|
||||
elif hasattr(sock, 'shutdown'):
|
||||
try:
|
||||
sock.shutdown(pysocket.SHUT_WR)
|
||||
except TypeError as e:
|
||||
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
|
||||
log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
|
||||
sock.shutdown()
|
||||
elif not PY2 and isinstance(sock, getattr(pysocket, 'SocketIO')):
|
||||
sock._sock.shutdown(pysocket.SHUT_WR)
|
||||
else:
|
||||
log('No idea how to signal end of writing')
|
||||
|
||||
|
||||
def write_to_socket(sock, data):
|
||||
if hasattr(sock, '_send_until_done'):
|
||||
# WrappedSocket (urllib3/contrib/pyopenssl) does not have `send`, but
|
||||
# only `sendall`, which uses `_send_until_done` under the hood.
|
||||
return sock._send_until_done(data)
|
||||
elif hasattr(sock, 'send'):
|
||||
return sock.send(data)
|
||||
else:
|
||||
return os.write(sock.fileno(), data)
|
@ -0,0 +1,281 @@
|
||||
# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
|
||||
# Copyright (c) Thierry Bouvet (@tbouvet)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
|
||||
|
||||
|
||||
class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
|
||||
|
||||
def get_swarm_node_id(self):
|
||||
"""
|
||||
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
|
||||
of Docker host the module is executed on
|
||||
:return:
|
||||
NodeID of host or 'None' if not part of Swarm
|
||||
"""
|
||||
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return swarm_info['Swarm']['NodeID']
|
||||
return None
|
||||
|
||||
def check_if_swarm_node(self, node_id=None):
|
||||
"""
|
||||
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
|
||||
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
|
||||
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
|
||||
it is not executed on Swarm manager
|
||||
|
||||
:param node_id: Node identifier
|
||||
:return:
|
||||
bool: True if node is part of Swarm, False otherwise
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return True
|
||||
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if node_info['ID'] is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_manager(self):
|
||||
"""
|
||||
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
|
||||
is performed. The inspect_swarm() will fail if node is not a manager
|
||||
|
||||
:return: True if node is Swarm Manager, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
return True
|
||||
except APIError:
|
||||
return False
|
||||
|
||||
def fail_task_if_not_swarm_manager(self):
|
||||
"""
|
||||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail("Error running docker swarm module: must run on swarm manager node")
|
||||
|
||||
def check_if_swarm_worker(self):
|
||||
"""
|
||||
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
|
||||
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
|
||||
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
|
||||
"""
|
||||
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
|
||||
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
|
||||
host that is not part of Swarm it will fail the playbook
|
||||
|
||||
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
True if node is part of swarm but its state is down, False otherwise
|
||||
"""
|
||||
|
||||
if repeat_check < 1:
|
||||
repeat_check = 1
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
for retry in range(0, repeat_check):
|
||||
if retry > 0:
|
||||
sleep(5)
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
if node_info['Status']['State'] == 'down':
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_node_inspect(self, node_id=None, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about single node
|
||||
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
Single node information structure
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
if exc.status_code == 404:
|
||||
if skip_missing:
|
||||
return None
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if 'ManagerStatus' in node_info:
|
||||
if node_info['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info['ManagerStatus']['Addr'].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
|
||||
else:
|
||||
swarm_leader_ip = node_info['Status']['Addr']
|
||||
node_info['Status']['Addr'] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
|
||||
|
||||
:return:
|
||||
Structure with information about all nodes
|
||||
"""
|
||||
try:
|
||||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_list(self, output='short'):
|
||||
"""
|
||||
Returns list of nodes registered in Swarm
|
||||
|
||||
:param output: Defines format of returned data
|
||||
:return:
|
||||
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
|
||||
if 'output' is 'long' then returns data is list of dict containing the attributes as in
|
||||
output of command 'docker node ls'
|
||||
"""
|
||||
nodes_list = []
|
||||
|
||||
nodes_inspect = self.get_all_nodes_inspect()
|
||||
if nodes_inspect is None:
|
||||
return None
|
||||
|
||||
if output == 'short':
|
||||
for node in nodes_inspect:
|
||||
nodes_list.append(node['Description']['Hostname'])
|
||||
elif output == 'long':
|
||||
for node in nodes_inspect:
|
||||
node_property = {}
|
||||
|
||||
node_property.update({'ID': node['ID']})
|
||||
node_property.update({'Hostname': node['Description']['Hostname']})
|
||||
node_property.update({'Status': node['Status']['State']})
|
||||
node_property.update({'Availability': node['Spec']['Availability']})
|
||||
if 'ManagerStatus' in node:
|
||||
if node['ManagerStatus']['Leader'] is True:
|
||||
node_property.update({'Leader': True})
|
||||
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
|
||||
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
|
||||
|
||||
nodes_list.append(node_property)
|
||||
else:
|
||||
return None
|
||||
|
||||
return nodes_list
|
||||
|
||||
def get_node_name_by_id(self, nodeid):
|
||||
return self.get_node_inspect(nodeid)['Description']['Hostname']
|
||||
|
||||
def get_unlock_key(self):
|
||||
if self.docker_py_version < LooseVersion('2.7.0'):
|
||||
return None
|
||||
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
|
||||
|
||||
def get_service_inspect(self, service_id, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm service info as in 'docker service inspect' command about single service
|
||||
|
||||
:param service_id: service ID or name
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:return:
|
||||
Single service information structure
|
||||
"""
|
||||
try:
|
||||
service_info = self.inspect_service(service_id)
|
||||
except NotFound as exc:
|
||||
if skip_missing is False:
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
else:
|
||||
return None
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
|
||||
json_str = json.dumps(service_info, ensure_ascii=False)
|
||||
service_info = json.loads(json_str)
|
||||
return service_info
|
@ -0,0 +1,426 @@
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
import re
|
||||
from datetime import timedelta
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
|
||||
DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock'
|
||||
DEFAULT_TLS = False
|
||||
DEFAULT_TLS_VERIFY = False
|
||||
DEFAULT_TLS_HOSTNAME = 'localhost' # deprecated
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
|
||||
DOCKER_COMMON_ARGS = dict(
|
||||
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
|
||||
tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
|
||||
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
|
||||
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
|
||||
ca_path=dict(type='path', aliases=['ca_cert', 'tls_ca_cert', 'cacert_path']),
|
||||
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
|
||||
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
|
||||
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
|
||||
use_ssh_client=dict(type='bool', default=False),
|
||||
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
|
||||
debug=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
DOCKER_COMMON_ARGS_VARS = dict([
|
||||
[option_name, 'ansible_docker_%s' % option_name]
|
||||
for option_name in DOCKER_COMMON_ARGS
|
||||
if option_name != 'debug'
|
||||
])
|
||||
|
||||
DOCKER_MUTUALLY_EXCLUSIVE = []
|
||||
|
||||
DOCKER_REQUIRED_TOGETHER = [
|
||||
['client_cert', 'client_key']
|
||||
]
|
||||
|
||||
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
|
||||
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
|
||||
|
||||
|
||||
def is_image_name_id(name):
|
||||
"""Check whether the given image name is in fact an image ID (hash)."""
|
||||
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_tag(tag, allow_empty=False):
|
||||
"""Check whether the given string is a valid docker tag name."""
|
||||
if not tag:
|
||||
return allow_empty
|
||||
# See here ("Extended description") for a definition what tags can be:
|
||||
# https://docs.docker.com/engine/reference/commandline/tag/
|
||||
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
|
||||
|
||||
|
||||
def sanitize_result(data):
|
||||
"""Sanitize data object for return to Ansible.
|
||||
|
||||
When the data object contains types such as docker.types.containers.HostConfig,
|
||||
Ansible will fail when these are returned via exit_json or fail_json.
|
||||
HostConfig is derived from dict, but its constructor requires additional
|
||||
arguments. This function sanitizes data structures by recursively converting
|
||||
everything derived from dict to dict and everything derived from list (and tuple)
|
||||
to a list.
|
||||
"""
|
||||
if isinstance(data, dict):
|
||||
return dict((k, sanitize_result(v)) for k, v in data.items())
|
||||
elif isinstance(data, (list, tuple)):
|
||||
return [sanitize_result(v) for v in data]
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
def log_debug(msg, pretty_print=False):
|
||||
"""Write a log message to docker.log.
|
||||
|
||||
If ``pretty_print=True``, the message will be pretty-printed as JSON.
|
||||
"""
|
||||
with open('docker.log', 'a') as log_file:
|
||||
if pretty_print:
|
||||
log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
|
||||
log_file.write(u'\n')
|
||||
else:
|
||||
log_file.write(msg + u'\n')
|
||||
|
||||
|
||||
class DockerBaseClass(object):
|
||||
def __init__(self):
|
||||
self.debug = False
|
||||
|
||||
def log(self, msg, pretty_print=False):
|
||||
pass
|
||||
# if self.debug:
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
|
||||
def update_tls_hostname(result, old_behavior=False, deprecate_function=None, uses_tls=True):
|
||||
if result['tls_hostname'] is None:
|
||||
# get default machine name from the url
|
||||
parsed_url = urlparse(result['docker_host'])
|
||||
result['tls_hostname'] = parsed_url.netloc.rsplit(':', 1)[0]
|
||||
|
||||
|
||||
def compare_dict_allow_more_present(av, bv):
|
||||
'''
|
||||
Compare two dictionaries for whether every entry of the first is in the second.
|
||||
'''
|
||||
for key, value in av.items():
|
||||
if key not in bv:
|
||||
return False
|
||||
if bv[key] != value:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def compare_generic(a, b, method, datatype):
|
||||
'''
|
||||
Compare values a and b as described by method and datatype.
|
||||
|
||||
Returns ``True`` if the values compare equal, and ``False`` if not.
|
||||
|
||||
``a`` is usually the module's parameter, while ``b`` is a property
|
||||
of the current object. ``a`` must not be ``None`` (except for
|
||||
``datatype == 'value'``).
|
||||
|
||||
Valid values for ``method`` are:
|
||||
- ``ignore`` (always compare as equal);
|
||||
- ``strict`` (only compare if really equal)
|
||||
- ``allow_more_present`` (allow b to have elements which a does not have).
|
||||
|
||||
Valid values for ``datatype`` are:
|
||||
- ``value``: for simple values (strings, numbers, ...);
|
||||
- ``list``: for ``list``s or ``tuple``s where order matters;
|
||||
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
|
||||
matter;
|
||||
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
|
||||
not matter and which contain ``dict``s; ``allow_more_present`` is used
|
||||
for the ``dict``s, and these are assumed to be dictionaries of values;
|
||||
- ``dict``: for dictionaries of values.
|
||||
'''
|
||||
if method == 'ignore':
|
||||
return True
|
||||
# If a or b is None:
|
||||
if a is None or b is None:
|
||||
# If both are None: equality
|
||||
if a == b:
|
||||
return True
|
||||
# Otherwise, not equal for values, and equal
|
||||
# if the other is empty for set/list/dict
|
||||
if datatype == 'value':
|
||||
return False
|
||||
# For allow_more_present, allow a to be None
|
||||
if method == 'allow_more_present' and a is None:
|
||||
return True
|
||||
# Otherwise, the iterable object which is not None must have length 0
|
||||
return len(b if a is None else a) == 0
|
||||
# Do proper comparison (both objects not None)
|
||||
if datatype == 'value':
|
||||
return a == b
|
||||
elif datatype == 'list':
|
||||
if method == 'strict':
|
||||
return a == b
|
||||
else:
|
||||
i = 0
|
||||
for v in a:
|
||||
while i < len(b) and b[i] != v:
|
||||
i += 1
|
||||
if i == len(b):
|
||||
return False
|
||||
i += 1
|
||||
return True
|
||||
elif datatype == 'dict':
|
||||
if method == 'strict':
|
||||
return a == b
|
||||
else:
|
||||
return compare_dict_allow_more_present(a, b)
|
||||
elif datatype == 'set':
|
||||
set_a = set(a)
|
||||
set_b = set(b)
|
||||
if method == 'strict':
|
||||
return set_a == set_b
|
||||
else:
|
||||
return set_b >= set_a
|
||||
elif datatype == 'set(dict)':
|
||||
for av in a:
|
||||
found = False
|
||||
for bv in b:
|
||||
if compare_dict_allow_more_present(av, bv):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return False
|
||||
if method == 'strict':
|
||||
# If we would know that both a and b do not contain duplicates,
|
||||
# we could simply compare len(a) to len(b) to finish this test.
|
||||
# We can assume that b has no duplicates (as it is returned by
|
||||
# docker), but we do not know for a.
|
||||
for bv in b:
|
||||
found = False
|
||||
for av in a:
|
||||
if compare_dict_allow_more_present(av, bv):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DifferenceTracker(object):
|
||||
def __init__(self):
|
||||
self._diff = []
|
||||
|
||||
def add(self, name, parameter=None, active=None):
|
||||
self._diff.append(dict(
|
||||
name=name,
|
||||
parameter=parameter,
|
||||
active=active,
|
||||
))
|
||||
|
||||
def merge(self, other_tracker):
|
||||
self._diff.extend(other_tracker._diff)
|
||||
|
||||
@property
|
||||
def empty(self):
|
||||
return len(self._diff) == 0
|
||||
|
||||
def get_before_after(self):
|
||||
'''
|
||||
Return texts ``before`` and ``after``.
|
||||
'''
|
||||
before = dict()
|
||||
after = dict()
|
||||
for item in self._diff:
|
||||
before[item['name']] = item['active']
|
||||
after[item['name']] = item['parameter']
|
||||
return before, after
|
||||
|
||||
def has_difference_for(self, name):
|
||||
'''
|
||||
Returns a boolean if a difference exists for name
|
||||
'''
|
||||
return any(diff for diff in self._diff if diff['name'] == name)
|
||||
|
||||
def get_legacy_docker_container_diffs(self):
|
||||
'''
|
||||
Return differences in the docker_container legacy format.
|
||||
'''
|
||||
result = []
|
||||
for entry in self._diff:
|
||||
item = dict()
|
||||
item[entry['name']] = dict(
|
||||
parameter=entry['parameter'],
|
||||
container=entry['active'],
|
||||
)
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
def get_legacy_docker_diffs(self):
|
||||
'''
|
||||
Return differences in the docker_container legacy format.
|
||||
'''
|
||||
result = [entry['name'] for entry in self._diff]
|
||||
return result
|
||||
|
||||
|
||||
def sanitize_labels(labels, labels_field, client=None, module=None):
|
||||
def fail(msg):
|
||||
if client is not None:
|
||||
client.fail(msg)
|
||||
if module is not None:
|
||||
module.fail_json(msg=msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
if labels is None:
|
||||
return
|
||||
for k, v in list(labels.items()):
|
||||
if not isinstance(k, string_types):
|
||||
fail(
|
||||
"The key {key!r} of {field} is not a string!".format(
|
||||
field=labels_field, key=k))
|
||||
if isinstance(v, (bool, float)):
|
||||
fail(
|
||||
"The value {value!r} for {key!r} of {field} is not a string or something than can be safely converted to a string!".format(
|
||||
field=labels_field, key=k, value=v))
|
||||
labels[k] = to_text(v)
|
||||
|
||||
|
||||
def clean_dict_booleans_for_docker_api(data, allow_sequences=False):
|
||||
'''
|
||||
Go does not like Python booleans 'True' or 'False', while Ansible is just
|
||||
fine with them in YAML. As such, they need to be converted in cases where
|
||||
we pass dictionaries to the Docker API (e.g. docker_network's
|
||||
driver_options and docker_prune's filters). When `allow_sequences=True`
|
||||
YAML sequences (lists, tuples) are converted to [str] instead of str([...])
|
||||
which is the expected format of filters which accept lists such as labels.
|
||||
'''
|
||||
def sanitize(value):
|
||||
if value is True:
|
||||
return 'true'
|
||||
elif value is False:
|
||||
return 'false'
|
||||
else:
|
||||
return str(value)
|
||||
|
||||
result = dict()
|
||||
if data is not None:
|
||||
for k, v in data.items():
|
||||
result[str(k)] = [sanitize(e) for e in v] if allow_sequences and is_sequence(v) else sanitize(v)
|
||||
return result
|
||||
|
||||
|
||||
def convert_duration_to_nanosecond(time_str):
|
||||
"""
|
||||
Return time duration in nanosecond.
|
||||
"""
|
||||
if not isinstance(time_str, str):
|
||||
raise ValueError('Missing unit in duration - %s' % time_str)
|
||||
|
||||
regex = re.compile(
|
||||
r'^(((?P<hours>\d+)h)?'
|
||||
r'((?P<minutes>\d+)m(?!s))?'
|
||||
r'((?P<seconds>\d+)s)?'
|
||||
r'((?P<milliseconds>\d+)ms)?'
|
||||
r'((?P<microseconds>\d+)us)?)$'
|
||||
)
|
||||
parts = regex.match(time_str)
|
||||
|
||||
if not parts:
|
||||
raise ValueError('Invalid time duration - %s' % time_str)
|
||||
|
||||
parts = parts.groupdict()
|
||||
time_params = {}
|
||||
for (name, value) in parts.items():
|
||||
if value:
|
||||
time_params[name] = int(value)
|
||||
|
||||
delta = timedelta(**time_params)
|
||||
time_in_nanoseconds = (
|
||||
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
|
||||
) * 10 ** 3
|
||||
|
||||
return time_in_nanoseconds
|
||||
|
||||
|
||||
def normalize_healthcheck_test(test):
|
||||
if isinstance(test, (tuple, list)):
|
||||
return [str(e) for e in test]
|
||||
return ['CMD-SHELL', str(test)]
|
||||
|
||||
|
||||
def normalize_healthcheck(healthcheck, normalize_test=False):
|
||||
"""
|
||||
Return dictionary of healthcheck parameters.
|
||||
"""
|
||||
result = dict()
|
||||
|
||||
# All supported healthcheck parameters
|
||||
options = ('test', 'test_cli_compatible', 'interval', 'timeout', 'start_period', 'start_interval', 'retries')
|
||||
|
||||
duration_options = ('interval', 'timeout', 'start_period', 'start_interval')
|
||||
|
||||
for key in options:
|
||||
if key in healthcheck:
|
||||
value = healthcheck[key]
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if key in duration_options:
|
||||
value = convert_duration_to_nanosecond(value)
|
||||
if not value and not (healthcheck.get('test_cli_compatible') and key == 'test'):
|
||||
continue
|
||||
if key == 'retries':
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
'Cannot parse number of retries for healthcheck. '
|
||||
'Expected an integer, got "{0}".'.format(value)
|
||||
)
|
||||
if key == 'test' and value and normalize_test:
|
||||
value = normalize_healthcheck_test(value)
|
||||
result[key] = value
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_healthcheck(healthcheck):
|
||||
"""
|
||||
Return dictionary of healthcheck parameters and boolean if
|
||||
healthcheck defined in image was requested to be disabled.
|
||||
"""
|
||||
if (not healthcheck) or (not healthcheck.get('test')):
|
||||
return None, None
|
||||
|
||||
result = normalize_healthcheck(healthcheck, normalize_test=True)
|
||||
|
||||
if result['test'] == ['NONE']:
|
||||
# If the user explicitly disables the healthcheck, return None
|
||||
# as the healthcheck object, and set disable_healthcheck to True
|
||||
return None, True
|
||||
|
||||
return result, False
|
||||
|
||||
|
||||
def omit_none_from_dict(d):
|
||||
"""
|
||||
Return a copy of the dictionary with all keys with value None omitted.
|
||||
"""
|
||||
return dict((k, v) for (k, v) in d.items() if v is not None)
|
@ -0,0 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""Provide version object to compare version numbers."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible.module_utils.compat.version import LooseVersion, StrictVersion # noqa: F401, pylint: disable=unused-import
|
Reference in New Issue
Block a user