add docker examples

This commit is contained in:
Dany LE
2025-04-11 10:11:27 +00:00
parent 453233a25b
commit f135dcc118
704 changed files with 101445 additions and 21 deletions

View File

@ -0,0 +1,146 @@
#!/usr/bin/python
#
# Copyright (c) 2020 Matt Clay <mclay@redhat.com>
# Copyright (c) 2020 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: current_container_facts
short_description: Return facts about whether the module runs in a container
version_added: 1.1.0
description:
- Return facts about whether the module runs in a Docker or podman container.
- This module attempts a best-effort detection. There might be special cases where it does not work; if you encounter one,
L(please file an issue, https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md).
author:
- Felix Fontein (@felixfontein)
extends_documentation_fragment:
- community.docker.attributes
- community.docker.attributes.facts
- community.docker.attributes.facts_module
- community.docker.attributes.idempotent_not_modify_state
"""
EXAMPLES = r"""
- name: Get facts on current container
community.docker.current_container_facts:
- name: Print information on current container when running in a container
ansible.builtin.debug:
msg: "Container ID is {{ ansible_module_container_id }}"
when: ansible_module_running_in_container
"""
RETURN = r"""
ansible_facts:
description: Ansible facts returned by the module.
type: dict
returned: always
contains:
ansible_module_running_in_container:
description:
- Whether the module was able to detect that it runs in a container or not.
returned: always
type: bool
ansible_module_container_id:
description:
- The detected container ID.
- Contains an empty string if no container was detected.
returned: always
type: str
ansible_module_container_type:
description:
- The detected container environment.
- Contains an empty string if no container was detected, or a non-empty string identifying the container environment.
- V(docker) indicates that the module ran inside a regular Docker container.
- V(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported.
- V(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since
community.docker 2.4.0.
- V(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker
3.3.0.
returned: always
type: str
choices:
- ''
- docker
- azure_pipelines
- github_actions
- podman
"""
import os
import re
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(dict(), supports_check_mode=True)
cpuset_path = '/proc/self/cpuset'
mountinfo_path = '/proc/self/mountinfo'
container_id = ''
container_type = ''
contents = None
if os.path.exists(cpuset_path):
# File content varies based on the environment:
# No Container: /
# Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
# Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
# Podman: /../../../../../..
# While this was true and worked well for a long time, this seems to be no longer accurate
# with newer Docker / Podman versions and/or with cgroupv2. That's why the /proc/self/mountinfo
# detection further down is done when this test is inconclusive.
with open(cpuset_path, 'rb') as f:
contents = f.read().decode('utf-8')
cgroup_path, cgroup_name = os.path.split(contents.strip())
if cgroup_path == '/docker':
container_id = cgroup_name
container_type = 'docker'
if cgroup_path == '/azpl_job':
container_id = cgroup_name
container_type = 'azure_pipelines'
if cgroup_path == '/actions_job':
container_id = cgroup_name
container_type = 'github_actions'
if not container_id and os.path.exists(mountinfo_path):
with open(mountinfo_path, 'rb') as f:
contents = f.read().decode('utf-8')
# As to why this works, see the explanations by Matt Clay in
# https://github.com/ansible/ansible/blob/80d2f8da02052f64396da6b8caaf820eedbf18e2/test/lib/ansible_test/_internal/docker_util.py#L571-L610
for line in contents.splitlines():
parts = line.split()
if len(parts) >= 5 and parts[4] == '/etc/hostname':
m = re.match('.*/([a-f0-9]{64})/hostname$', parts[3])
if m:
container_id = m.group(1)
container_type = 'docker'
m = re.match('.*/([a-f0-9]{64})/userdata/hostname$', parts[3])
if m:
container_id = m.group(1)
container_type = 'podman'
module.exit_json(ansible_facts=dict(
ansible_module_running_in_container=container_id != '',
ansible_module_container_id=container_id,
ansible_module_container_type=container_type,
))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,699 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# Copyright (c) 2023, Léo El Amri (@lel-amri)
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_compose_v2
short_description: Manage multi-container Docker applications with Docker Compose CLI plugin
version_added: 3.6.0
description:
- Uses Docker Compose to start or shutdown services.
extends_documentation_fragment:
- community.docker.compose_v2
- community.docker.compose_v2.minimum_version
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
details:
- In check mode, pulling the image does not result in a changed result.
diff_mode:
support: none
idempotent:
support: partial
details:
- If O(state=restarted) or O(recreate=always) the module is not idempotent.
options:
state:
description:
- Desired state of the project.
- V(present) is equivalent to running C(docker compose up).
- V(stopped) is equivalent to running C(docker compose stop).
- V(absent) is equivalent to running C(docker compose down).
- V(restarted) is equivalent to running C(docker compose restart).
type: str
default: present
choices:
- absent
- stopped
- restarted
- present
pull:
description:
- Whether to pull images before running. This is used when C(docker compose up) is run.
- V(always) ensures that the images are always pulled, even when already present on the Docker daemon.
- V(missing) only pulls them when they are not present on the Docker daemon.
- V(never) never pulls images. If they are not present, the module will fail when trying to create the containers that
need them.
- V(policy) use the Compose file's C(pull_policy) defined for the service to figure out what to do.
type: str
choices:
- always
- missing
- never
- policy
default: policy
build:
description:
- Whether to build images before starting containers. This is used when C(docker compose up) is run.
- V(always) always builds before starting containers. This is equivalent to the C(--build) option of C(docker compose
up).
- V(never) never builds before starting containers. This is equivalent to the C(--no-build) option of C(docker compose
up).
- V(policy) uses the policy as defined in the Compose file.
type: str
choices:
- always
- never
- policy
default: policy
dependencies:
description:
- When O(state) is V(present) or V(restarted), specify whether or not to include linked services.
type: bool
default: true
ignore_build_events:
description:
- Ignores image building events for change detection.
- If O(state=present) and O(ignore_build_events=true) and O(build=always), a rebuild that does not trigger a container
restart no longer results in RV(ignore:changed=true).
- Note that Docker Compose 2.31.0 is the first Compose 2.x version to emit build events. For older versions, the behavior
is always as if O(ignore_build_events=true).
type: bool
default: true
version_added: 4.2.0
recreate:
description:
- By default containers will be recreated when their configuration differs from the service definition.
- Setting to V(never) ignores configuration differences and leaves existing containers unchanged.
- Setting to V(always) forces recreation of all existing containers.
type: str
default: auto
choices:
- always
- never
- auto
renew_anon_volumes:
description:
- Whether to recreate instead of reuse anonymous volumes from previous containers.
- V(true) is equivalent to the C(--renew-anon-volumes) option of C(docker compose up).
type: bool
default: false
version_added: 4.0.0
remove_images:
description:
- Use with O(state=absent) to remove all images or only local images.
type: str
choices:
- all
- local
remove_volumes:
description:
- Use with O(state=absent) to remove data volumes.
type: bool
default: false
remove_orphans:
description:
- Remove containers for services not defined in the Compose file.
type: bool
default: false
timeout:
description:
- Timeout in seconds for container shutdown when attached or when containers are already running.
type: int
services:
description:
- Specifies a subset of services to be targeted.
type: list
elements: str
scale:
description:
- Define how to scale services when running C(docker compose up).
- Provide a dictionary of key/value pairs where the key is the name of the service and the value is an integer count
for the number of containers.
type: dict
version_added: 3.7.0
wait:
description:
- When running C(docker compose up), pass C(--wait) to wait for services to be running/healthy.
- A timeout can be set with the O(wait_timeout) option.
type: bool
default: false
version_added: 3.8.0
wait_timeout:
description:
- When O(wait=true), wait at most this amount of seconds.
type: int
version_added: 3.8.0
assume_yes:
description:
- When O(assume_yes=true), pass C(-y)/C(--yes) to assume "yes" as answer to all prompts and run non-interactively.
- Right now a prompt is asked whenever a non-matching volume should be re-created. O(assume_yes=false)
results in the question being answered by "no", which will simply re-use the existing volume.
- This option is only available on Docker Compose 2.32.0 or newer.
type: bool
default: false
version_added: 4.5.0
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_compose_v2_pull
"""
EXAMPLES = r"""
# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
# flask directory
- name: Run using a project directory
hosts: localhost
gather_facts: false
tasks:
- name: Tear down existing services
community.docker.docker_compose_v2:
project_src: flask
state: absent
- name: Create and start services
community.docker.docker_compose_v2:
project_src: flask
register: output
- name: Show results
ansible.builtin.debug:
var: output
- name: Run `docker compose up` again
community.docker.docker_compose_v2:
project_src: flask
register: output
- name: Show results
ansible.builtin.debug:
var: output
- ansible.builtin.assert:
that: not output.changed
- name: Stop all services
community.docker.docker_compose_v2:
project_src: flask
state: stopped
register: output
- name: Show results
ansible.builtin.debug:
var: output
- name: Verify that web and db services are not running
ansible.builtin.assert:
that:
- web_container.State != 'running'
- db_container.State != 'running'
vars:
web_container: >-
{{ output.containers | selectattr("Service", "equalto", "web") | first }}
db_container: >-
{{ output.containers | selectattr("Service", "equalto", "db") | first }}
- name: Restart services
community.docker.docker_compose_v2:
project_src: flask
state: restarted
register: output
- name: Show results
ansible.builtin.debug:
var: output
- name: Verify that web and db services are running
ansible.builtin.assert:
that:
- web_container.State == 'running'
- db_container.State == 'running'
vars:
web_container: >-
{{ output.containers | selectattr("Service", "equalto", "web") | first }}
db_container: >-
{{ output.containers | selectattr("Service", "equalto", "db") | first }}
"""
RETURN = r"""
containers:
description:
- A list of containers associated to the service.
returned: success
type: list
elements: dict
contains:
Command:
description:
- The container's command.
type: raw
CreatedAt:
description:
- The timestamp when the container was created.
type: str
sample: "2024-01-02 12:20:41 +0100 CET"
ExitCode:
description:
- The container's exit code.
type: int
Health:
description:
- The container's health check.
type: raw
ID:
description:
- The container's ID.
type: str
sample: "44a7d607219a60b7db0a4817fb3205dce46e91df2cb4b78a6100b6e27b0d3135"
Image:
description:
- The container's image.
type: str
Labels:
description:
- Labels for this container.
type: dict
LocalVolumes:
description:
- The local volumes count.
type: str
Mounts:
description:
- Mounts.
type: str
Name:
description:
- The container's primary name.
type: str
Names:
description:
- List of names of the container.
type: list
elements: str
Networks:
description:
- List of networks attached to this container.
type: list
elements: str
Ports:
description:
- List of port assignments as a string.
type: str
Publishers:
description:
- List of port assigments.
type: list
elements: dict
contains:
URL:
description:
- Interface the port is bound to.
type: str
TargetPort:
description:
- The container's port the published port maps to.
type: int
PublishedPort:
description:
- The port that is published.
type: int
Protocol:
description:
- The protocol.
type: str
choices:
- tcp
- udp
RunningFor:
description:
- Amount of time the container runs.
type: str
Service:
description:
- The name of the service.
type: str
Size:
description:
- The container's size.
type: str
sample: "0B"
State:
description:
- The container's state.
type: str
sample: running
Status:
description:
- The container's status.
type: str
sample: Up About a minute
images:
description:
- A list of images associated to the service.
returned: success
type: list
elements: dict
contains:
ID:
description:
- The image's ID.
type: str
sample: sha256:c8bccc0af9571ec0d006a43acb5a8d08c4ce42b6cc7194dd6eb167976f501ef1
ContainerName:
description:
- Name of the conainer this image is used by.
type: str
Repository:
description:
- The repository where this image belongs to.
type: str
Tag:
description:
- The tag of the image.
type: str
Size:
description:
- The image's size in bytes.
type: int
actions:
description:
- A list of actions that have been applied.
returned: success
type: list
elements: dict
contains:
what:
description:
- What kind of resource was changed.
type: str
sample: container
choices:
- container
- image
- network
- service
- unknown
- volume
id:
description:
- The ID of the resource that was changed.
type: str
sample: container
status:
description:
- The status change that happened.
type: str
sample: Creating
choices:
- Starting
- Exiting
- Restarting
- Creating
- Stopping
- Killing
- Removing
- Recreating
- Pulling
- Building
"""
import traceback
from ansible.module_utils.common.validation import check_type_int
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
from ansible_collections.community.docker.plugins.module_utils.compose_v2 import (
BaseComposeManager,
common_compose_argspec_ex,
is_failed,
)
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
class ServicesManager(BaseComposeManager):
def __init__(self, client):
super(ServicesManager, self).__init__(client)
parameters = self.client.module.params
self.state = parameters['state']
self.dependencies = parameters['dependencies']
self.pull = parameters['pull']
self.build = parameters['build']
self.ignore_build_events = parameters['ignore_build_events']
self.recreate = parameters['recreate']
self.remove_images = parameters['remove_images']
self.remove_volumes = parameters['remove_volumes']
self.remove_orphans = parameters['remove_orphans']
self.renew_anon_volumes = parameters['renew_anon_volumes']
self.timeout = parameters['timeout']
self.services = parameters['services'] or []
self.scale = parameters['scale'] or {}
self.wait = parameters['wait']
self.wait_timeout = parameters['wait_timeout']
self.yes = parameters['assume_yes']
if self.compose_version < LooseVersion('2.32.0') and self.yes:
self.fail('assume_yes=true needs Docker Compose 2.32.0 or newer, not version %s' % (self.compose_version, ))
for key, value in self.scale.items():
if not isinstance(key, string_types):
self.fail('The key %s for `scale` is not a string' % repr(key))
try:
value = check_type_int(value)
except TypeError as exc:
self.fail('The value %s for `scale[%s]` is not an integer' % (repr(value), repr(key)))
if value < 0:
self.fail('The value %s for `scale[%s]` is negative' % (repr(value), repr(key)))
self.scale[key] = value
def run(self):
if self.state == 'present':
result = self.cmd_up()
elif self.state == 'stopped':
result = self.cmd_stop()
elif self.state == 'restarted':
result = self.cmd_restart()
elif self.state == 'absent':
result = self.cmd_down()
result['containers'] = self.list_containers()
result['images'] = self.list_images()
self.cleanup_result(result)
return result
def get_up_cmd(self, dry_run, no_start=False):
args = self.get_base_args() + ['up', '--detach', '--no-color', '--quiet-pull']
if self.pull != 'policy':
args.extend(['--pull', self.pull])
if self.remove_orphans:
args.append('--remove-orphans')
if self.recreate == 'always':
args.append('--force-recreate')
if self.recreate == 'never':
args.append('--no-recreate')
if self.renew_anon_volumes:
args.append('--renew-anon-volumes')
if not self.dependencies:
args.append('--no-deps')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
if self.build == 'always':
args.append('--build')
elif self.build == 'never':
args.append('--no-build')
for key, value in sorted(self.scale.items()):
args.extend(['--scale', '%s=%d' % (key, value)])
if self.wait:
args.append('--wait')
if self.wait_timeout is not None:
args.extend(['--wait-timeout', str(self.wait_timeout)])
if no_start:
args.append('--no-start')
if dry_run:
args.append('--dry-run')
if self.yes:
# Note that for Docker Compose 2.32.x and 2.33.x, the long form is '--y' and not '--yes'.
# This was fixed in Docker Compose 2.34.0 (https://github.com/docker/compose/releases/tag/v2.34.0).
args.append('-y' if self.compose_version < LooseVersion('2.34.0') else '--yes')
args.append('--')
for service in self.services:
args.append(service)
return args
def cmd_up(self):
result = dict()
args = self.get_up_cmd(self.check_mode)
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
self.emit_warnings(events)
self.update_result(result, events, stdout, stderr, ignore_service_pull_events=True, ignore_build_events=self.ignore_build_events)
self.update_failed(result, events, args, stdout, stderr, rc)
return result
def get_stop_cmd(self, dry_run):
args = self.get_base_args() + ['stop']
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
if dry_run:
args.append('--dry-run')
args.append('--')
for service in self.services:
args.append(service)
return args
def _are_containers_stopped(self):
for container in self.list_containers_raw():
if container['State'] not in ('created', 'exited', 'stopped', 'killed'):
return False
return True
def cmd_stop(self):
# Since 'docker compose stop' **always** claims it is stopping containers, even if they are already
# stopped, we have to do this a bit more complicated.
result = dict()
# Make sure all containers are created
args_1 = self.get_up_cmd(self.check_mode, no_start=True)
rc_1, stdout_1, stderr_1 = self.client.call_cli(*args_1, cwd=self.project_src)
events_1 = self.parse_events(stderr_1, dry_run=self.check_mode, nonzero_rc=rc_1 != 0)
self.emit_warnings(events_1)
self.update_result(result, events_1, stdout_1, stderr_1, ignore_service_pull_events=True, ignore_build_events=self.ignore_build_events)
is_failed_1 = is_failed(events_1, rc_1)
if not is_failed_1 and not self._are_containers_stopped():
# Make sure all containers are stopped
args_2 = self.get_stop_cmd(self.check_mode)
rc_2, stdout_2, stderr_2 = self.client.call_cli(*args_2, cwd=self.project_src)
events_2 = self.parse_events(stderr_2, dry_run=self.check_mode, nonzero_rc=rc_2 != 0)
self.emit_warnings(events_2)
self.update_result(result, events_2, stdout_2, stderr_2)
else:
args_2 = []
rc_2, stdout_2, stderr_2 = 0, b'', b''
events_2 = []
# Compose result
self.update_failed(
result,
events_1 + events_2,
args_1 if is_failed_1 else args_2,
stdout_1 if is_failed_1 else stdout_2,
stderr_1 if is_failed_1 else stderr_2,
rc_1 if is_failed_1 else rc_2,
)
return result
def get_restart_cmd(self, dry_run):
args = self.get_base_args() + ['restart']
if not self.dependencies:
args.append('--no-deps')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
if dry_run:
args.append('--dry-run')
args.append('--')
for service in self.services:
args.append(service)
return args
def cmd_restart(self):
result = dict()
args = self.get_restart_cmd(self.check_mode)
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
self.emit_warnings(events)
self.update_result(result, events, stdout, stderr)
self.update_failed(result, events, args, stdout, stderr, rc)
return result
def get_down_cmd(self, dry_run):
args = self.get_base_args() + ['down']
if self.remove_orphans:
args.append('--remove-orphans')
if self.remove_images:
args.extend(['--rmi', self.remove_images])
if self.remove_volumes:
args.append('--volumes')
if self.timeout is not None:
args.extend(['--timeout', '%d' % self.timeout])
if dry_run:
args.append('--dry-run')
args.append('--')
for service in self.services:
args.append(service)
return args
def cmd_down(self):
result = dict()
args = self.get_down_cmd(self.check_mode)
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
self.emit_warnings(events)
self.update_result(result, events, stdout, stderr)
self.update_failed(result, events, args, stdout, stderr, rc)
return result
def main():
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'present', 'stopped', 'restarted']),
dependencies=dict(type='bool', default=True),
pull=dict(type='str', choices=['always', 'missing', 'never', 'policy'], default='policy'),
build=dict(type='str', choices=['always', 'never', 'policy'], default='policy'),
recreate=dict(type='str', default='auto', choices=['always', 'never', 'auto']),
renew_anon_volumes=dict(type='bool', default=False),
remove_images=dict(type='str', choices=['all', 'local']),
remove_volumes=dict(type='bool', default=False),
remove_orphans=dict(type='bool', default=False),
timeout=dict(type='int'),
services=dict(type='list', elements='str'),
scale=dict(type='dict'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int'),
ignore_build_events=dict(type='bool', default=True),
assume_yes=dict(type='bool', default=False),
)
argspec_ex = common_compose_argspec_ex()
argument_spec.update(argspec_ex.pop('argspec'))
client = AnsibleModuleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
needs_api_version=False,
**argspec_ex
)
try:
manager = ServicesManager(client)
result = manager.run()
manager.cleanup()
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,303 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_compose_v2_exec
short_description: Run command in a container of a Compose service
version_added: 3.13.0
description:
- Uses Docker Compose to run a command in a service's container.
- This can be used to run one-off commands in an existing service's container, and encapsulates C(docker compose exec).
extends_documentation_fragment:
- community.docker.compose_v2
- community.docker.compose_v2.minimum_version
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: N/A
details:
- Whether the executed command is idempotent depends on the command.
options:
service:
description:
- The service to run the command in.
type: str
required: true
index:
description:
- The index of the container to run the command in if the service has multiple replicas.
type: int
argv:
type: list
elements: str
description:
- The command to execute.
- Since this is a list of arguments, no quoting is needed.
- Exactly one of O(argv) or O(command) must be specified.
command:
type: str
description:
- The command to execute.
- Exactly one of O(argv) or O(command) must be specified.
chdir:
type: str
description:
- The directory to run the command in.
detach:
description:
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
type: bool
default: false
user:
type: str
description:
- If specified, the user to execute this command with.
stdin:
type: str
description:
- Set the stdin of the command directly to the specified value.
- Can only be used if O(detach=false).
stdin_add_newline:
type: bool
default: true
description:
- If set to V(true), appends a newline to O(stdin).
strip_empty_ends:
type: bool
default: true
description:
- Strip empty lines from the end of stdout/stderr in result.
privileged:
type: bool
default: false
description:
- Whether to give extended privileges to the process.
tty:
type: bool
default: true
description:
- Whether to allocate a TTY.
env:
description:
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
in order to avoid data loss.
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
string }}").
type: dict
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_compose_v2
notes:
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
or other shell features such as redirects.
"""
EXAMPLES = r"""
- name: Run a simple command (command)
community.docker.docker_compose_v2_exec:
service: foo
command: /bin/bash -c "ls -lah"
chdir: /root
register: result
- name: Print stdout
ansible.builtin.debug:
var: result.stdout
- name: Run a simple command (argv)
community.docker.docker_compose_v2_exec:
service: foo
argv:
- /bin/bash
- "-c"
- "ls -lah > /dev/stderr"
chdir: /root
register: result
- name: Print stderr lines
ansible.builtin.debug:
var: result.stderr_lines
"""
RETURN = r"""
stdout:
type: str
returned: success and O(detach=false)
description:
- The standard output of the container command.
stderr:
type: str
returned: success and O(detach=false)
description:
- The standard error output of the container command.
rc:
type: int
returned: success and O(detach=false)
sample: 0
description:
- The exit code of the command.
"""
import shlex
import traceback
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.six import string_types
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
from ansible_collections.community.docker.plugins.module_utils.compose_v2 import (
BaseComposeManager,
common_compose_argspec_ex,
)
class ExecManager(BaseComposeManager):
def __init__(self, client):
super(ExecManager, self).__init__(client)
parameters = self.client.module.params
self.service = parameters['service']
self.index = parameters['index']
self.chdir = parameters['chdir']
self.detach = parameters['detach']
self.user = parameters['user']
self.stdin = parameters['stdin']
self.strip_empty_ends = parameters['strip_empty_ends']
self.privileged = parameters['privileged']
self.tty = parameters['tty']
self.env = parameters['env']
self.argv = parameters['argv']
if parameters['command'] is not None:
self.argv = shlex.split(parameters['command'])
if self.detach and self.stdin is not None:
self.mail('If detach=true, stdin cannot be provided.')
if self.stdin is not None and parameters['stdin_add_newline']:
self.stdin += '\n'
if self.env is not None:
for name, value in list(self.env.items()):
if not isinstance(value, string_types):
self.fail(
"Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )
)
self.env[name] = to_text(value, errors='surrogate_or_strict')
def get_exec_cmd(self, dry_run, no_start=False):
args = self.get_base_args(plain_progress=True) + ['exec']
if self.index is not None:
args.extend(['--index', str(self.index)])
if self.chdir is not None:
args.extend(['--workdir', self.chdir])
if self.detach:
args.extend(['--detach'])
if self.user is not None:
args.extend(['--user', self.user])
if self.privileged:
args.append('--privileged')
if not self.tty:
args.append('--no-TTY')
if self.env:
for name, value in list(self.env.items()):
args.append('--env')
args.append('{0}={1}'.format(name, value))
args.append('--')
args.append(self.service)
args.extend(self.argv)
return args
def run(self):
args = self.get_exec_cmd(self.check_mode)
kwargs = {
'cwd': self.project_src,
}
if self.stdin is not None:
kwargs['data'] = self.stdin.encode('utf-8')
if self.detach:
kwargs['check_rc'] = True
rc, stdout, stderr = self.client.call_cli(*args, **kwargs)
if self.detach:
return {}
stdout = to_text(stdout)
stderr = to_text(stderr)
if self.strip_empty_ends:
stdout = stdout.rstrip('\r\n')
stderr = stderr.rstrip('\r\n')
return {
'changed': True,
'rc': rc,
'stdout': stdout,
'stderr': stderr,
}
def main():
argument_spec = dict(
service=dict(type='str', required=True),
index=dict(type='int'),
argv=dict(type='list', elements='str'),
command=dict(type='str'),
chdir=dict(type='str'),
detach=dict(type='bool', default=False),
user=dict(type='str'),
stdin=dict(type='str'),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
privileged=dict(type='bool', default=False),
tty=dict(type='bool', default=True),
env=dict(type='dict'),
)
argspec_ex = common_compose_argspec_ex()
argument_spec.update(argspec_ex.pop('argspec'))
client = AnsibleModuleDockerClient(
argument_spec=argument_spec,
supports_check_mode=False,
needs_api_version=False,
**argspec_ex
)
try:
manager = ExecManager(client)
result = manager.run()
manager.cleanup()
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,203 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_compose_v2_pull
short_description: Pull a Docker compose project
version_added: 3.6.0
description:
- Uses Docker Compose to pull images for a project.
extends_documentation_fragment:
- community.docker.compose_v2
- community.docker.compose_v2.minimum_version
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
details:
- If O(policy=always), the module will always indicate a change. Docker Compose does not give any information whether
pulling would update the image or not.
diff_mode:
support: none
idempotent:
support: full
options:
policy:
description:
- Whether to pull images before running. This is used when C(docker compose up) is ran.
- V(always) ensures that the images are always pulled, even when already present on the Docker daemon.
- V(missing) only pulls them when they are not present on the Docker daemon. This is only supported since Docker Compose
2.22.0.
type: str
choices:
- always
- missing
default: always
ignore_buildable:
description:
- If set to V(true), will not pull images that can be built.
type: bool
default: false
version_added: 3.12.0
include_deps:
description:
- If set to V(true), also pull services that are declared as dependencies.
- This only makes sense if O(services) is used.
type: bool
default: false
version_added: 3.12.0
services:
description:
- Specifies a subset of services to be targeted.
type: list
elements: str
version_added: 3.12.0
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_compose_v2
"""
EXAMPLES = r"""
- name: Pull images for flask project
community.docker.docker_compose_v2_pull:
project_src: /path/to/flask
"""
RETURN = r"""
actions:
description:
- A list of actions that have been applied.
returned: success
type: list
elements: dict
contains:
what:
description:
- What kind of resource was changed.
type: str
sample: container
choices:
- image
- unknown
id:
description:
- The ID of the resource that was changed.
type: str
sample: container
status:
description:
- The status change that happened.
type: str
sample: Pulling
choices:
- Pulling
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
from ansible_collections.community.docker.plugins.module_utils.compose_v2 import (
BaseComposeManager,
common_compose_argspec_ex,
)
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
class PullManager(BaseComposeManager):
def __init__(self, client):
super(PullManager, self).__init__(client)
parameters = self.client.module.params
self.policy = parameters['policy']
self.ignore_buildable = parameters['ignore_buildable']
self.include_deps = parameters['include_deps']
self.services = parameters['services'] or []
if self.policy != 'always' and self.compose_version < LooseVersion('2.22.0'):
# https://github.com/docker/compose/pull/10981 - 2.22.0
self.fail('A pull policy other than always is only supported since Docker Compose 2.22.0. {0} has version {1}'.format(
self.client.get_cli(), self.compose_version))
if self.ignore_buildable and self.compose_version < LooseVersion('2.15.0'):
# https://github.com/docker/compose/pull/10134 - 2.15.0
self.fail('--ignore-buildable is only supported since Docker Compose 2.15.0. {0} has version {1}'.format(
self.client.get_cli(), self.compose_version))
def get_pull_cmd(self, dry_run, no_start=False):
args = self.get_base_args() + ['pull']
if self.policy != 'always':
args.extend(['--policy', self.policy])
if self.ignore_buildable:
args.append('--ignore-buildable')
if self.include_deps:
args.append('--include-deps')
if dry_run:
args.append('--dry-run')
args.append('--')
for service in self.services:
args.append(service)
return args
def run(self):
result = dict()
args = self.get_pull_cmd(self.check_mode)
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
self.emit_warnings(events)
self.update_result(result, events, stdout, stderr, ignore_service_pull_events=self.policy != 'missing' and not self.check_mode)
self.update_failed(result, events, args, stdout, stderr, rc)
self.cleanup_result(result)
return result
def main():
argument_spec = dict(
policy=dict(type='str', choices=['always', 'missing'], default='always'),
ignore_buildable=dict(type='bool', default=False),
include_deps=dict(type='bool', default=False),
services=dict(type='list', elements='str'),
)
argspec_ex = common_compose_argspec_ex()
argument_spec.update(argspec_ex.pop('argspec'))
client = AnsibleModuleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
needs_api_version=False,
**argspec_ex
)
try:
manager = PullManager(client)
result = manager.run()
manager.cleanup()
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,436 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_compose_v2_run
short_description: Run command in a new container of a Compose service
version_added: 3.13.0
description:
- Uses Docker Compose to run a command in a new container for a service.
- This encapsulates C(docker compose run).
extends_documentation_fragment:
- community.docker.compose_v2
- community.docker.compose_v2.minimum_version
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: N/A
details:
- Whether the executed command is idempotent depends on the command.
options:
service:
description:
- The service to run the command in.
type: str
required: true
argv:
type: list
elements: str
description:
- The command to execute.
- Since this is a list of arguments, no quoting is needed.
- O(argv) or O(command) are mutually exclusive.
command:
type: str
description:
- The command to execute.
- O(argv) or O(command) are mutually exclusive.
build:
description:
- Build image before starting container.
- Note that building can insert information into RV(stdout) or RV(stderr).
type: bool
default: false
cap_add:
description:
- Linux capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- Linux capabilities to drop from the container.
type: list
elements: str
entrypoint:
description:
- Override the entrypoint of the container image.
type: str
interactive:
description:
- Whether to keep STDIN open even if not attached.
type: bool
default: true
labels:
description:
- Add or override labels to the container.
type: list
elements: str
name:
description:
- Assign a name to the container.
type: str
no_deps:
description:
- Do not start linked services.
type: bool
default: false
publish:
description:
- Publish a container's port(s) to the host.
type: list
elements: str
quiet_pull:
description:
- Pull without printing progress information.
- Note that pulling can insert information into RV(stdout) or RV(stderr).
type: bool
default: false
remove_orphans:
description:
- Remove containers for services not defined in the Compose file.
type: bool
default: false
cleanup:
description:
- Automatically remove th econtainer when it exits.
- Corresponds to the C(--rm) option of C(docker compose run).
type: bool
default: false
service_ports:
description:
- Run command with all service's ports enabled and mapped to the host.
type: bool
default: false
use_aliases:
description:
- Use the service's network C(useAliases) in the network(s) the container connects to.
type: bool
default: false
volumes:
description:
- Bind mount one or more volumes.
type: list
elements: str
chdir:
type: str
description:
- The directory to run the command in.
detach:
description:
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
Instead, the return value RV(container_id) is provided.
type: bool
default: false
user:
type: str
description:
- If specified, the user to execute this command with.
stdin:
type: str
description:
- Set the stdin of the command directly to the specified value.
- Can only be used if O(detach=false).
stdin_add_newline:
type: bool
default: true
description:
- If set to V(true), appends a newline to O(stdin).
strip_empty_ends:
type: bool
default: true
description:
- Strip empty lines from the end of stdout/stderr in result.
tty:
type: bool
default: true
description:
- Whether to allocate a TTY.
env:
description:
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
in order to avoid data loss.
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
string }}").
type: dict
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_compose_v2
notes:
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
or other shell features such as redirects.
"""
EXAMPLES = r"""
- name: Run a simple command (command)
community.docker.docker_compose_v2_run:
service: foo
command: /bin/bash -c "ls -lah"
chdir: /root
register: result
- name: Print stdout
ansible.builtin.debug:
var: result.stdout
- name: Run a simple command (argv)
community.docker.docker_compose_v2_run:
service: foo
argv:
- /bin/bash
- "-c"
- "ls -lah > /dev/stderr"
chdir: /root
register: result
- name: Print stderr lines
ansible.builtin.debug:
var: result.stderr_lines
"""
RETURN = r"""
container_id:
type: str
returned: success and O(detach=true)
description:
- The ID of the created container.
stdout:
type: str
returned: success and O(detach=false)
description:
- The standard output of the container command.
stderr:
type: str
returned: success and O(detach=false)
description:
- The standard error output of the container command.
rc:
type: int
returned: success and O(detach=false)
sample: 0
description:
- The exit code of the command.
"""
import shlex
import traceback
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.six import string_types
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
from ansible_collections.community.docker.plugins.module_utils.compose_v2 import (
BaseComposeManager,
common_compose_argspec_ex,
)
class ExecManager(BaseComposeManager):
def __init__(self, client):
super(ExecManager, self).__init__(client)
parameters = self.client.module.params
self.service = parameters['service']
self.build = parameters['build']
self.cap_add = parameters['cap_add']
self.cap_drop = parameters['cap_drop']
self.entrypoint = parameters['entrypoint']
self.interactive = parameters['interactive']
self.labels = parameters['labels']
self.name = parameters['name']
self.no_deps = parameters['no_deps']
self.publish = parameters['publish']
self.quiet_pull = parameters['quiet_pull']
self.remove_orphans = parameters['remove_orphans']
self.do_cleanup = parameters['cleanup']
self.service_ports = parameters['service_ports']
self.use_aliases = parameters['use_aliases']
self.volumes = parameters['volumes']
self.chdir = parameters['chdir']
self.detach = parameters['detach']
self.user = parameters['user']
self.stdin = parameters['stdin']
self.strip_empty_ends = parameters['strip_empty_ends']
self.tty = parameters['tty']
self.env = parameters['env']
self.argv = parameters['argv']
if parameters['command'] is not None:
self.argv = shlex.split(parameters['command'])
if self.detach and self.stdin is not None:
self.mail('If detach=true, stdin cannot be provided.')
if self.stdin is not None and parameters['stdin_add_newline']:
self.stdin += '\n'
if self.env is not None:
for name, value in list(self.env.items()):
if not isinstance(value, string_types):
self.fail(
"Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, )
)
self.env[name] = to_text(value, errors='surrogate_or_strict')
def get_run_cmd(self, dry_run, no_start=False):
args = self.get_base_args(plain_progress=True) + ['run']
if self.build:
args.append('--build')
if self.cap_add:
for cap in self.cap_add:
args.extend(['--cap-add', cap])
if self.cap_drop:
for cap in self.cap_drop:
args.extend(['--cap-drop', cap])
if self.entrypoint is not None:
args.extend(['--entrypoint', self.entrypoint])
if not self.interactive:
args.append('--no-interactive')
if self.labels:
for label in self.labels:
args.extend(['--label', label])
if self.name is not None:
args.extend(['--name', self.name])
if self.no_deps:
args.append('--no-deps')
if self.publish:
for publish in self.publish:
args.extend(['--publish', publish])
if self.quiet_pull:
args.append('--quiet-pull')
if self.remove_orphans:
args.append('--remove-orphans')
if self.do_cleanup:
args.append('--rm')
if self.service_ports:
args.append('--service-ports')
if self.use_aliases:
args.append('--use-aliases')
if self.volumes:
for volume in self.volumes:
args.extend(['--volume', volume])
if self.chdir is not None:
args.extend(['--workdir', self.chdir])
if self.detach:
args.extend(['--detach'])
if self.user is not None:
args.extend(['--user', self.user])
if not self.tty:
args.append('--no-TTY')
if self.env:
for name, value in list(self.env.items()):
args.append('--env')
args.append('{0}={1}'.format(name, value))
args.append('--')
args.append(self.service)
if self.argv:
args.extend(self.argv)
return args
def run(self):
args = self.get_run_cmd(self.check_mode)
kwargs = {
'cwd': self.project_src,
}
if self.stdin is not None:
kwargs['data'] = self.stdin.encode('utf-8')
if self.detach:
kwargs['check_rc'] = True
rc, stdout, stderr = self.client.call_cli(*args, **kwargs)
if self.detach:
return {
'container_id': stdout.strip(),
}
stdout = to_text(stdout)
stderr = to_text(stderr)
if self.strip_empty_ends:
stdout = stdout.rstrip('\r\n')
stderr = stderr.rstrip('\r\n')
return {
'changed': True,
'rc': rc,
'stdout': stdout,
'stderr': stderr,
}
def main():
argument_spec = dict(
service=dict(type='str', required=True),
argv=dict(type='list', elements='str'),
command=dict(type='str'),
build=dict(type='bool', default=False),
cap_add=dict(type='list', elements='str'),
cap_drop=dict(type='list', elements='str'),
entrypoint=dict(type='str'),
interactive=dict(type='bool', default=True),
labels=dict(type='list', elements='str'),
name=dict(type='str'),
no_deps=dict(type='bool', default=False),
publish=dict(type='list', elements='str'),
quiet_pull=dict(type='bool', default=False),
remove_orphans=dict(type='bool', default=False),
cleanup=dict(type='bool', default=False),
service_ports=dict(type='bool', default=False),
use_aliases=dict(type='bool', default=False),
volumes=dict(type='list', elements='str'),
chdir=dict(type='str'),
detach=dict(type='bool', default=False),
user=dict(type='str'),
stdin=dict(type='str'),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
tty=dict(type='bool', default=True),
env=dict(type='dict'),
)
argspec_ex = common_compose_argspec_ex()
argument_spec.update(argspec_ex.pop('argspec'))
client = AnsibleModuleDockerClient(
argument_spec=argument_spec,
supports_check_mode=False,
needs_api_version=False,
**argspec_ex
)
try:
manager = ExecManager(client)
result = manager.run()
manager.cleanup()
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,436 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_config
short_description: Manage docker configs
description:
- Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
- Adds to the metadata of new configs C(ansible_key), an encrypted hash representation of the data, which is then used in
future runs to test if a config has changed. If C(ansible_key) is not present, then a config will not be updated unless
the O(force) option is set.
- Updates to configs are performed by removing the config and creating it again.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_2_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: partial
details:
- If O(force=true) the module is not idempotent.
options:
data:
description:
- The value of the config.
- Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present).
type: str
data_is_b64:
description:
- If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used.
- To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option.
type: bool
default: false
data_src:
description:
- The file on the target from which to read the config.
- Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present).
type: path
version_added: 1.10.0
labels:
description:
- A map of key:value meta data, where both the C(key) and C(value) are expected to be a string.
- If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating
it again.
type: dict
force:
description:
- Use with O(state=present) to always remove and recreate an existing config.
- If V(true), an existing config will be replaced, even if it has not been changed.
type: bool
default: false
rolling_versions:
description:
- If set to V(true), configs are created with an increasing version number appended to their name.
- Adds a label containing the version number to the managed configs with the name C(ansible_version).
type: bool
default: false
version_added: 2.2.0
versions_to_keep:
description:
- When using O(rolling_versions), the number of old versions of the config to keep.
- Extraneous old configs are deleted after the new one is created.
- Set to V(-1) to keep everything or V(0) or V(1) to keep only the current one.
type: int
default: 5
version_added: 2.2.0
name:
description:
- The name of the config.
type: str
required: true
state:
description:
- Set to V(present), if the config should exist, and V(absent), if it should not.
type: str
default: present
choices:
- absent
- present
template_driver:
description:
- Set to V(golang) to use a Go template in O(data) or a Go template file in O(data_src).
type: str
choices:
- golang
version_added: 2.5.0
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
- "Docker API >= 1.30"
author:
- Chris Houseknecht (@chouseknecht)
- John Hu (@ushuz)
"""
EXAMPLES = r"""
- name: Create config foo (from a file on the control machine)
community.docker.docker_config:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
data_is_b64: true
state: present
- name: Create config foo (from a file on the target machine)
community.docker.docker_config:
name: foo
data_src: /path/to/config/file
state: present
- name: Change the config data
community.docker.docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
community.docker.docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the config
two: '2'
state: present
- name: No change
community.docker.docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing config
state: present
- name: Update an existing label
community.docker.docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the config
one: '1'
state: present
- name: Force the (re-)creation of the config
community.docker.docker_config:
name: foo
data: Goodnight everyone!
force: true
state: present
- name: Remove config foo
community.docker.docker_config:
name: foo
state: absent
"""
RETURN = r"""
config_id:
description:
- The ID assigned by Docker to the config object.
returned: success and O(state=present)
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
config_name:
description:
- The name of the created config object.
returned: success and O(state=present)
type: str
sample: 'awesome_config'
version_added: 2.2.0
"""
import base64
import hashlib
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils.common import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
compare_generic,
sanitize_labels,
)
from ansible.module_utils.common.text.converters import to_native, to_bytes
class ConfigManager(DockerBaseClass):
def __init__(self, client, results):
super(ConfigManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
data_src = parameters.get('data_src')
if data_src is not None:
try:
with open(data_src, 'rb') as f:
self.data = f.read()
except Exception as exc:
self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.rolling_versions = parameters.get('rolling_versions')
self.versions_to_keep = parameters.get('versions_to_keep')
self.template_driver = parameters.get('template_driver')
if self.rolling_versions:
self.version = 0
self.data_key = None
self.configs = []
def __call__(self):
self.get_config()
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
self.remove_old_versions()
elif self.state == 'absent':
self.absent()
def get_version(self, config):
try:
return int(config.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
except ValueError:
return 0
def remove_old_versions(self):
if not self.rolling_versions or self.versions_to_keep < 0:
return
if not self.check_mode:
while len(self.configs) > max(self.versions_to_keep, 1):
self.remove_config(self.configs.pop(0))
def get_config(self):
''' Find an existing config. '''
try:
configs = self.client.configs(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
if self.rolling_versions:
self.configs = [
config
for config in configs
if config['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
]
self.configs.sort(key=self.get_version)
else:
self.configs = [
config for config in configs if config['Spec']['Name'] == self.name
]
def create_config(self):
''' Create a new config '''
config_id = None
# We ca not see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.rolling_versions:
self.version += 1
labels['ansible_version'] = str(self.version)
self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
# only use templating argument when self.template_driver is defined
kwargs = {}
if self.template_driver:
kwargs['templating'] = {
'name': self.template_driver
}
config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs)
self.configs += self.client.configs(filters={'id': config_id})
except APIError as exc:
self.client.fail("Error creating config: %s" % to_native(exc))
if isinstance(config_id, dict):
config_id = config_id['ID']
return config_id
def remove_config(self, config):
try:
if not self.check_mode:
self.client.remove_config(config['ID'])
except APIError as exc:
self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc)))
def present(self):
''' Handles state == 'present', creating or updating the config '''
if self.configs:
config = self.configs[-1]
self.results['config_id'] = config['ID']
self.results['config_name'] = config['Spec']['Name']
data_changed = False
template_driver_changed = False
attrs = config.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
else:
if not self.force:
self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'")
# template_driver has changed if it was set in the previous config
# and now it differs, or if it was not set but now it is.
if attrs.get('Templating', {}).get('Name'):
if attrs['Templating']['Name'] != self.template_driver:
template_driver_changed = True
elif self.template_driver:
template_driver_changed = True
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if self.rolling_versions:
self.version = self.get_version(config)
if data_changed or template_driver_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the config
if not self.rolling_versions:
self.absent()
config_id = self.create_config()
self.results['changed'] = True
self.results['config_id'] = config_id
self.results['config_name'] = self.name
else:
self.results['changed'] = True
self.results['config_id'] = self.create_config()
self.results['config_name'] = self.name
def absent(self):
''' Handles state == 'absent', removing the config '''
if self.configs:
for config in self.configs:
self.remove_config(config)
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str'),
data_is_b64=dict(type='bool', default=False),
data_src=dict(type='path'),
labels=dict(type='dict'),
force=dict(type='bool', default=False),
rolling_versions=dict(type='bool', default=False),
versions_to_keep=dict(type='int', default=5),
template_driver=dict(type='str', choices=['golang']),
)
required_if = [
('state', 'present', ['data', 'data_src'], True),
]
mutually_exclusive = [
('data', 'data_src'),
]
option_minimal_versions = dict(
template_driver=dict(docker_py_version='5.0.3', docker_api_version='1.37'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
min_docker_version='2.6.0',
min_docker_api_version='1.30',
option_minimal_versions=option_minimal_versions,
)
sanitize_labels(client.module.params['labels'], 'labels', client)
try:
results = dict(
changed=False,
)
ConfigManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,871 @@
#!/usr/bin/python
#
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_container_copy_into
short_description: Copy a file into a Docker container
version_added: 3.4.0
description:
- Copy a file into a Docker container.
- Similar to C(docker cp).
- To copy files in a non-running container, you must provide the O(owner_id) and O(group_id) options. This is also necessary
if the container does not contain a C(/bin/sh) shell with an C(id) tool.
attributes:
check_mode:
support: full
diff_mode:
support: full
details:
- Additional data will need to be transferred to compute diffs.
- The module uses R(the MAX_FILE_SIZE_FOR_DIFF ansible-core configuration,MAX_FILE_SIZE_FOR_DIFF) to determine for how
large files diffs should be computed.
idempotent:
support: partial
details:
- If O(force=true) the module is not idempotent.
options:
container:
description:
- The name of the container to copy files to.
type: str
required: true
path:
description:
- Path to a file on the managed node.
- Mutually exclusive with O(content). One of O(content) and O(path) is required.
type: path
content:
description:
- The file's content.
- If you plan to provide binary data, provide it pre-encoded to base64, and set O(content_is_b64=true).
- Mutually exclusive with O(path). One of O(content) and O(path) is required.
type: str
content_is_b64:
description:
- If set to V(true), the content in O(content) is assumed to be Base64 encoded and will be decoded before being used.
- To use binary O(content), it is better to keep it Base64 encoded and let it be decoded by this option. Otherwise you
risk the data to be interpreted as UTF-8 and corrupted.
type: bool
default: false
container_path:
description:
- Path to a file inside the Docker container.
- Must be an absolute path.
type: str
required: true
follow:
description:
- This flag indicates that filesystem links in the Docker container, if they exist, should be followed.
type: bool
default: false
local_follow:
description:
- This flag indicates that filesystem links in the source tree (where the module is executed), if they exist, should
be followed.
type: bool
default: true
owner_id:
description:
- The owner ID to use when writing the file to disk.
- If provided, O(group_id) must also be provided.
- If not provided, the module will try to determine the user and group ID for the current user in the container. This
will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available. Also
the container must be running.
type: int
group_id:
description:
- The group ID to use when writing the file to disk.
- If provided, O(owner_id) must also be provided.
- If not provided, the module will try to determine the user and group ID for the current user in the container. This
will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available. Also
the container must be running.
type: int
mode:
description:
- The file mode to use when writing the file to disk.
- Will use the file's mode from the source system if this option is not provided.
type: int
force:
description:
- If set to V(true), force writing the file (without performing any idempotency checks).
- If set to V(false), only write the file if it does not exist on the target. If a filesystem object exists at the destination,
the module will not do any change.
- If this option is not specified, the module will be idempotent. To verify idempotency, it will try to get information
on the filesystem object in the container, and if everything seems to match will download the file from the container
to compare it to the file to upload.
type: bool
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
author:
- "Felix Fontein (@felixfontein)"
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Copy a file into the container
community.docker.docker_container_copy_into:
container: mydata
path: /home/user/data.txt
container_path: /data/input.txt
- name: Copy a file into the container with owner, group, and mode set
community.docker.docker_container_copy_into:
container: mydata
path: /home/user/bin/runme.o
container_path: /bin/runme
owner_id: 0 # root
group_id: 0 # root
mode: 0755 # readable and executable by all users, writable by root
"""
RETURN = r"""
container_path:
description:
- The actual path in the container.
- Can only be different from O(container_path) when O(follow=true).
type: str
returned: success
"""
import base64
import io
import os
import stat
import traceback
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.copy import (
DockerFileCopyError,
DockerFileNotFound,
DockerUnexpectedError,
determine_user_group,
fetch_file_ex,
put_file,
put_file_content,
stat_file,
)
from ansible_collections.community.docker.plugins.module_utils._scramble import generate_insecure_key, scramble
def are_fileobjs_equal(f1, f2):
'''Given two (buffered) file objects, compare their contents.'''
blocksize = 65536
b1buf = b''
b2buf = b''
while True:
if f1 and len(b1buf) < blocksize:
f1b = f1.read(blocksize)
if not f1b:
# f1 is EOF, so stop reading from it
f1 = None
b1buf += f1b
if f2 and len(b2buf) < blocksize:
f2b = f2.read(blocksize)
if not f2b:
# f2 is EOF, so stop reading from it
f2 = None
b2buf += f2b
if not b1buf or not b2buf:
# At least one of f1 and f2 is EOF and all its data has
# been processed. If both are EOF and their data has been
# processed, the files are equal, otherwise not.
return not b1buf and not b2buf
# Compare the next chunk of data, and remove it from the buffers
buflen = min(len(b1buf), len(b2buf))
if b1buf[:buflen] != b2buf[:buflen]:
return False
b1buf = b1buf[buflen:]
b2buf = b2buf[buflen:]
def are_fileobjs_equal_read_first(f1, f2):
'''Given two (buffered) file objects, compare their contents.
Returns a tuple (is_equal, content_of_f1), where the first element indicates
whether the two file objects have the same content, and the second element is
the content of the first file object.'''
blocksize = 65536
b1buf = b''
b2buf = b''
is_equal = True
content = []
while True:
if f1 and len(b1buf) < blocksize:
f1b = f1.read(blocksize)
if not f1b:
# f1 is EOF, so stop reading from it
f1 = None
b1buf += f1b
if f2 and len(b2buf) < blocksize:
f2b = f2.read(blocksize)
if not f2b:
# f2 is EOF, so stop reading from it
f2 = None
b2buf += f2b
if not b1buf or not b2buf:
# At least one of f1 and f2 is EOF and all its data has
# been processed. If both are EOF and their data has been
# processed, the files are equal, otherwise not.
is_equal = not b1buf and not b2buf
break
# Compare the next chunk of data, and remove it from the buffers
buflen = min(len(b1buf), len(b2buf))
if b1buf[:buflen] != b2buf[:buflen]:
is_equal = False
break
content.append(b1buf[:buflen])
b1buf = b1buf[buflen:]
b2buf = b2buf[buflen:]
content.append(b1buf)
if f1:
content.append(f1.read())
return is_equal, b''.join(content)
def is_container_file_not_regular_file(container_stat):
for bit in (
# https://pkg.go.dev/io/fs#FileMode
32 - 1, # ModeDir
32 - 4, # ModeTemporary
32 - 5, # ModeSymlink
32 - 6, # ModeDevice
32 - 7, # ModeNamedPipe
32 - 8, # ModeSocket
32 - 11, # ModeCharDevice
32 - 13, # ModeIrregular
):
if container_stat['mode'] & (1 << bit) != 0:
return True
return False
def get_container_file_mode(container_stat):
mode = container_stat['mode'] & 0xFFF
if container_stat['mode'] & (1 << (32 - 9)) != 0: # ModeSetuid
mode |= stat.S_ISUID # set UID bit
if container_stat['mode'] & (1 << (32 - 10)) != 0: # ModeSetgid
mode |= stat.S_ISGID # set GID bit
if container_stat['mode'] & (1 << (32 - 12)) != 0: # ModeSticky
mode |= stat.S_ISVTX # sticky bit
return mode
def add_other_diff(diff, in_path, member):
if diff is None:
return
diff['before_header'] = in_path
if member.isdir():
diff['before'] = '(directory)'
elif member.issym() or member.islnk():
diff['before'] = member.linkname
elif member.ischr():
diff['before'] = '(character device)'
elif member.isblk():
diff['before'] = '(block device)'
elif member.isfifo():
diff['before'] = '(fifo)'
elif member.isdev():
diff['before'] = '(device)'
elif member.isfile():
raise DockerUnexpectedError('should not be a regular file')
else:
diff['before'] = '(unknown filesystem object)'
def retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat=None, link_target=None):
if diff is None:
return
if regular_stat is not None:
# First handle all filesystem object types that are not regular files
if regular_stat['mode'] & (1 << (32 - 1)) != 0:
diff['before_header'] = container_path
diff['before'] = '(directory)'
return
elif regular_stat['mode'] & (1 << (32 - 4)) != 0:
diff['before_header'] = container_path
diff['before'] = '(temporary file)'
return
elif regular_stat['mode'] & (1 << (32 - 5)) != 0:
diff['before_header'] = container_path
diff['before'] = link_target
return
elif regular_stat['mode'] & (1 << (32 - 6)) != 0:
diff['before_header'] = container_path
diff['before'] = '(device)'
return
elif regular_stat['mode'] & (1 << (32 - 7)) != 0:
diff['before_header'] = container_path
diff['before'] = '(named pipe)'
return
elif regular_stat['mode'] & (1 << (32 - 8)) != 0:
diff['before_header'] = container_path
diff['before'] = '(socket)'
return
elif regular_stat['mode'] & (1 << (32 - 11)) != 0:
diff['before_header'] = container_path
diff['before'] = '(character device)'
return
elif regular_stat['mode'] & (1 << (32 - 13)) != 0:
diff['before_header'] = container_path
diff['before'] = '(unknown filesystem object)'
return
# Check whether file is too large
if regular_stat['size'] > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return
# We need to get hold of the content
def process_none(in_path):
diff['before'] = ''
def process_regular(in_path, tar, member):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
def process_symlink(in_path, member):
diff['before_header'] = in_path
diff['before'] = member.linkname
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def is_binary(content):
if b'\x00' in content:
return True
# TODO: better detection
# (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this is not too bad...)
return False
def are_fileobjs_equal_with_diff_of_first(f1, f2, size, diff, max_file_size_for_diff, container_path):
if diff is None:
return are_fileobjs_equal(f1, f2)
if size > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return are_fileobjs_equal(f1, f2)
is_equal, content = are_fileobjs_equal_read_first(f1, f2)
if is_binary(content):
diff['dst_binary'] = 1
else:
diff['before_header'] = container_path
diff['before'] = to_text(content)
return is_equal
def add_diff_dst_from_regular_member(diff, max_file_size_for_diff, container_path, tar, member):
if diff is None:
return
if member.size > max_file_size_for_diff > 0:
diff['dst_larger'] = max_file_size_for_diff
return
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
content = tar_f.read()
if is_binary(content):
diff['dst_binary'] = 1
else:
diff['before_header'] = container_path
diff['before'] = to_text(content)
def copy_dst_to_src(diff):
if diff is None:
return
for f, t in [
('dst_size', 'src_size'),
('dst_binary', 'src_binary'),
('before_header', 'after_header'),
('before', 'after'),
]:
if f in diff:
diff[t] = diff[f]
elif t in diff:
diff.pop(t)
def is_file_idempotent(client, container, managed_path, container_path, follow_links, local_follow_links, owner_id, group_id, mode,
force=False, diff=None, max_file_size_for_diff=1):
# Retrieve information of local file
try:
file_stat = os.stat(managed_path) if local_follow_links else os.lstat(managed_path)
except OSError as exc:
if exc.errno == 2:
raise DockerFileNotFound('Cannot find local file {managed_path}'.format(managed_path=managed_path))
raise
if mode is None:
mode = stat.S_IMODE(file_stat.st_mode)
if not stat.S_ISLNK(file_stat.st_mode) and not stat.S_ISREG(file_stat.st_mode):
raise DockerFileCopyError('Local path {managed_path} is not a symbolic link or file')
if diff is not None:
if file_stat.st_size > max_file_size_for_diff > 0:
diff['src_larger'] = max_file_size_for_diff
elif stat.S_ISLNK(file_stat.st_mode):
diff['after_header'] = managed_path
diff['after'] = os.readlink(managed_path)
else:
with open(managed_path, 'rb') as f:
content = f.read()
if is_binary(content):
diff['src_binary'] = 1
else:
diff['after_header'] = managed_path
diff['after'] = to_text(content)
# When forcing and we are not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
# Resolve symlinks in the container (if requested), and get information on container's file
real_container_path, regular_stat, link_target = stat_file(
client,
container,
in_path=container_path,
follow_links=follow_links,
)
# Follow links in the Docker container?
if follow_links:
container_path = real_container_path
# If the file was not found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path
diff['before'] = ''
return container_path, mode, False
# When forcing, go!
if force:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# If force is set to False, and the destination exists, assume there's nothing to do
if force is False:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
copy_dst_to_src(diff)
return container_path, mode, True
# Basic idempotency checks
if stat.S_ISLNK(file_stat.st_mode):
if link_target is None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
local_link_target = os.readlink(managed_path)
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, local_link_target == link_target
if link_target is not None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if is_container_file_not_regular_file(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if file_stat.st_size != regular_stat['size']:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if mode != get_container_file_mode(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# Fetch file from container
def process_none(in_path):
return container_path, mode, False
def process_regular(in_path, tar, member):
# Check things like user/group ID and mode
if any([
member.mode & 0xFFF != mode,
member.uid != owner_id,
member.gid != group_id,
not stat.S_ISREG(file_stat.st_mode),
member.size != file_stat.st_size,
]):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
return container_path, mode, False
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
with open(managed_path, 'rb') as local_f:
is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path)
return container_path, mode, is_equal
def process_symlink(in_path, member):
if diff is not None:
diff['before_header'] = in_path
diff['before'] = member.linkname
# Check things like user/group ID and mode
if member.mode & 0xFFF != mode:
return container_path, mode, False
if member.uid != owner_id:
return container_path, mode, False
if member.gid != group_id:
return container_path, mode, False
if not stat.S_ISLNK(file_stat.st_mode):
return container_path, mode, False
local_link_target = os.readlink(managed_path)
return container_path, mode, member.linkname == local_link_target
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
return container_path, mode, False
return fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def copy_file_into_container(client, container, managed_path, container_path, follow_links, local_follow_links,
owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
if diff:
diff = {}
else:
diff = None
container_path, mode, idempotent = is_file_idempotent(
client,
container,
managed_path,
container_path,
follow_links,
local_follow_links,
owner_id,
group_id,
mode,
force=force,
diff=diff,
max_file_size_for_diff=max_file_size_for_diff,
)
changed = not idempotent
if changed and not client.module.check_mode:
put_file(
client,
container,
in_path=managed_path,
out_path=container_path,
user_id=owner_id,
group_id=group_id,
mode=mode,
follow_links=local_follow_links,
)
result = dict(
container_path=container_path,
changed=changed,
)
if diff:
result['diff'] = diff
client.module.exit_json(**result)
def is_content_idempotent(client, container, content, container_path, follow_links, owner_id, group_id, mode,
force=False, diff=None, max_file_size_for_diff=1):
if diff is not None:
if len(content) > max_file_size_for_diff > 0:
diff['src_larger'] = max_file_size_for_diff
elif is_binary(content):
diff['src_binary'] = 1
else:
diff['after_header'] = 'dynamically generated'
diff['after'] = to_text(content)
# When forcing and we are not following links in the container, go!
if force and not follow_links:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
return container_path, mode, False
# Resolve symlinks in the container (if requested), and get information on container's file
real_container_path, regular_stat, link_target = stat_file(
client,
container,
in_path=container_path,
follow_links=follow_links,
)
# Follow links in the Docker container?
if follow_links:
container_path = real_container_path
# If the file was not found, continue
if regular_stat is None:
if diff is not None:
diff['before_header'] = container_path
diff['before'] = ''
return container_path, mode, False
# When forcing, go!
if force:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# If force is set to False, and the destination exists, assume there's nothing to do
if force is False:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
copy_dst_to_src(diff)
return container_path, mode, True
# Basic idempotency checks
if link_target is not None:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if is_container_file_not_regular_file(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if len(content) != regular_stat['size']:
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
if mode != get_container_file_mode(regular_stat):
retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
return container_path, mode, False
# Fetch file from container
def process_none(in_path):
if diff is not None:
diff['before'] = ''
return container_path, mode, False
def process_regular(in_path, tar, member):
# Check things like user/group ID and mode
if any([
member.mode & 0xFFF != mode,
member.uid != owner_id,
member.gid != group_id,
member.size != len(content),
]):
add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
return container_path, mode, False
tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, io.BytesIO(content), member.size, diff, max_file_size_for_diff, in_path)
return container_path, mode, is_equal
def process_symlink(in_path, member):
if diff is not None:
diff['before_header'] = in_path
diff['before'] = member.linkname
return container_path, mode, False
def process_other(in_path, member):
add_other_diff(diff, in_path, member)
return container_path, mode, False
return fetch_file_ex(
client,
container,
in_path=container_path,
process_none=process_none,
process_regular=process_regular,
process_symlink=process_symlink,
process_other=process_other,
follow_links=follow_links,
)
def copy_content_into_container(client, container, content, container_path, follow_links,
owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
if diff:
diff = {}
else:
diff = None
container_path, mode, idempotent = is_content_idempotent(
client,
container,
content,
container_path,
follow_links,
owner_id,
group_id,
mode,
force=force,
diff=diff,
max_file_size_for_diff=max_file_size_for_diff,
)
changed = not idempotent
if changed and not client.module.check_mode:
put_file_content(
client,
container,
content=content,
out_path=container_path,
user_id=owner_id,
group_id=group_id,
mode=mode,
)
result = dict(
container_path=container_path,
changed=changed,
)
if diff:
# Since the content is no_log, make sure that the before/after strings look sufficiently different
key = generate_insecure_key()
diff['scrambled_diff'] = base64.b64encode(key)
for k in ('before', 'after'):
if k in diff:
diff[k] = scramble(diff[k], key)
result['diff'] = diff
client.module.exit_json(**result)
def main():
argument_spec = dict(
container=dict(type='str', required=True),
path=dict(type='path'),
container_path=dict(type='str', required=True),
follow=dict(type='bool', default=False),
local_follow=dict(type='bool', default=True),
owner_id=dict(type='int'),
group_id=dict(type='int'),
mode=dict(type='int'),
force=dict(type='bool'),
content=dict(type='str', no_log=True),
content_is_b64=dict(type='bool', default=False),
# Undocumented parameters for use by the action plugin
_max_file_size_for_diff=dict(type='int'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
min_docker_api_version='1.20',
supports_check_mode=True,
mutually_exclusive=[('path', 'content')],
required_together=[('owner_id', 'group_id')],
required_by={
'content': ['mode'],
},
)
container = client.module.params['container']
managed_path = client.module.params['path']
container_path = client.module.params['container_path']
follow = client.module.params['follow']
local_follow = client.module.params['local_follow']
owner_id = client.module.params['owner_id']
group_id = client.module.params['group_id']
mode = client.module.params['mode']
force = client.module.params['force']
content = client.module.params['content']
max_file_size_for_diff = client.module.params['_max_file_size_for_diff'] or 1
if content is not None:
if client.module.params['content_is_b64']:
try:
content = base64.b64decode(content)
except Exception as e: # depending on Python version and error, multiple different exceptions can be raised
client.fail('Cannot Base64 decode the content option: {0}'.format(e))
else:
content = to_bytes(content)
if not container_path.startswith(os.path.sep):
container_path = os.path.join(os.path.sep, container_path)
container_path = os.path.normpath(container_path)
try:
if owner_id is None or group_id is None:
owner_id, group_id = determine_user_group(client, container)
if content is not None:
copy_content_into_container(
client,
container,
content,
container_path,
follow_links=follow,
owner_id=owner_id,
group_id=group_id,
mode=mode,
force=force,
diff=client.module._diff,
max_file_size_for_diff=max_file_size_for_diff,
)
elif managed_path is not None:
copy_file_into_container(
client,
container,
managed_path,
container_path,
follow_links=follow,
local_follow_links=local_follow,
owner_id=owner_id,
group_id=group_id,
mode=mode,
force=force,
diff=client.module._diff,
max_file_size_for_diff=max_file_size_for_diff,
)
else:
# Can happen if a user explicitly passes `content: null` or `path: null`...
client.fail('One of path and content must be supplied')
except NotFound as exc:
client.fail('Could not find container "{1}" or resource in it ({0})'.format(exc, container))
except APIError as exc:
client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
except DockerException as exc:
client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
except RequestException as exc:
client.fail(
'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'.format(exc, container),
exception=traceback.format_exc())
except DockerUnexpectedError as exc:
client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
except DockerFileCopyError as exc:
client.fail(to_native(exc))
except OSError as exc:
client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,314 @@
#!/usr/bin/python
#
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_container_exec
short_description: Execute command in a docker container
version_added: 1.5.0
description:
- Executes a command in a Docker container.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: N/A
details:
- Whether the executed command is idempotent depends on the command.
options:
container:
type: str
required: true
description:
- The name of the container to execute the command in.
argv:
type: list
elements: str
description:
- The command to execute.
- Since this is a list of arguments, no quoting is needed.
- Exactly one of O(argv) or O(command) must be specified.
command:
type: str
description:
- The command to execute.
- Exactly one of O(argv) or O(command) must be specified.
chdir:
type: str
description:
- The directory to run the command in.
detach:
description:
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
type: bool
default: false
version_added: 2.1.0
user:
type: str
description:
- If specified, the user to execute this command with.
stdin:
type: str
description:
- Set the stdin of the command directly to the specified value.
- Can only be used if O(detach=false).
stdin_add_newline:
type: bool
default: true
description:
- If set to V(true), appends a newline to O(stdin).
strip_empty_ends:
type: bool
default: true
description:
- Strip empty lines from the end of stdout/stderr in result.
tty:
type: bool
default: false
description:
- Whether to allocate a TTY.
env:
description:
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
in order to avoid data loss.
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
string }}").
type: dict
version_added: 2.1.0
notes:
- Does B(not work with TCP TLS sockets) when using O(stdin). This is caused by the inability to send C(close_notify) without
closing the connection with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605)
for more information.
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
or other shell features such as redirects.
author:
- "Felix Fontein (@felixfontein)"
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Run a simple command (command)
community.docker.docker_container_exec:
container: foo
command: /bin/bash -c "ls -lah"
chdir: /root
register: result
- name: Print stdout
ansible.builtin.debug:
var: result.stdout
- name: Run a simple command (argv)
community.docker.docker_container_exec:
container: foo
argv:
- /bin/bash
- "-c"
- "ls -lah > /dev/stderr"
chdir: /root
register: result
- name: Print stderr lines
ansible.builtin.debug:
var: result.stderr_lines
"""
RETURN = r"""
stdout:
type: str
returned: success and O(detach=false)
description:
- The standard output of the container command.
stderr:
type: str
returned: success and O(detach=false)
description:
- The standard error output of the container command.
rc:
type: int
returned: success and O(detach=false)
sample: 0
description:
- The exit code of the command.
exec_id:
type: str
returned: success and O(detach=true)
sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5
description:
- The execution ID of the command.
version_added: 2.1.0
"""
import shlex
import traceback
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
from ansible.module_utils.six import string_types
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.selectors import selectors
from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
DockerSocketHandlerModule,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
APIError,
DockerException,
NotFound,
)
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import format_environment
def main():
argument_spec = dict(
container=dict(type='str', required=True),
argv=dict(type='list', elements='str'),
command=dict(type='str'),
chdir=dict(type='str'),
detach=dict(type='bool', default=False),
user=dict(type='str'),
stdin=dict(type='str'),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
tty=dict(type='bool', default=False),
env=dict(type='dict'),
)
option_minimal_versions = dict(
chdir=dict(docker_api_version='1.35'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
option_minimal_versions=option_minimal_versions,
mutually_exclusive=[('argv', 'command')],
required_one_of=[('argv', 'command')],
)
container = client.module.params['container']
argv = client.module.params['argv']
command = client.module.params['command']
chdir = client.module.params['chdir']
detach = client.module.params['detach']
user = client.module.params['user']
stdin = client.module.params['stdin']
strip_empty_ends = client.module.params['strip_empty_ends']
tty = client.module.params['tty']
env = client.module.params['env']
if env is not None:
for name, value in list(env.items()):
if not isinstance(value, string_types):
client.module.fail_json(
msg="Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
env[name] = to_text(value, errors='surrogate_or_strict')
if command is not None:
argv = shlex.split(command)
if detach and stdin is not None:
client.module.fail_json(msg='If detach=true, stdin cannot be provided.')
if stdin is not None and client.module.params['stdin_add_newline']:
stdin += '\n'
try:
data = {
'Container': container,
'User': user or '',
'Privileged': False,
'Tty': False,
'AttachStdin': bool(stdin),
'AttachStdout': True,
'AttachStderr': True,
'Cmd': argv,
'Env': format_environment(env) if env is not None else None,
}
if chdir is not None:
data['WorkingDir'] = chdir
exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
exec_id = exec_data['Id']
data = {
'Tty': tty,
'Detach': detach,
}
if detach:
client.post_json_to_text('/exec/{0}/start', exec_id, data=data)
client.module.exit_json(changed=True, exec_id=exec_id)
else:
if stdin and not detach:
exec_socket = client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data)
try:
with DockerSocketHandlerModule(exec_socket, client.module, selectors) as exec_socket_handler:
if stdin:
exec_socket_handler.write(to_bytes(stdin))
stdout, stderr = exec_socket_handler.consume()
finally:
exec_socket.close()
else:
stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, data=data, stream=False, tty=tty, demux=True)
result = client.get_json('/exec/{0}/json', exec_id)
stdout = to_text(stdout or b'')
stderr = to_text(stderr or b'')
if strip_empty_ends:
stdout = stdout.rstrip('\r\n')
stderr = stderr.rstrip('\r\n')
client.module.exit_json(
changed=True,
stdout=stdout,
stderr=stderr,
rc=result.get('ExitCode') or 0,
)
except NotFound:
client.fail('Could not find container "{0}"'.format(container))
except APIError as e:
if e.response is not None and e.response.status_code == 409:
client.fail('The container "{0}" has been paused ({1})'.format(container, to_native(e)))
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,116 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_container_info
short_description: Retrieves facts about docker container
description:
- Retrieves facts about a docker container.
- Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container) returns
for a non-absent container.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- The name of the container to inspect.
- When identifying an existing container name may be a name or a long or short container ID.
type: str
required: true
author:
- "Felix Fontein (@felixfontein)"
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get infos on container
community.docker.docker_container_info:
name: mydata
register: result
- name: Does container exist?
ansible.builtin.debug:
msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
- name: Print information about container
ansible.builtin.debug:
var: result.container
when: result.exists
"""
RETURN = r"""
exists:
description:
- Returns whether the container exists.
type: bool
returned: always
sample: true
container:
description:
- Facts representing the current state of the container. Matches the docker inspection output.
- Will be V(none) if container does not exist.
returned: always
type: dict
sample: '{ "AppArmorProfile": "", "Args": [], "Config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false,
"Cmd": [ "/usr/bin/supervisord" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
], "ExposedPorts": { "443/tcp": {}, "80/tcp": {} }, "Hostname": "8e47bf643eb9", "Image": "lnmp_nginx:v1", "Labels": {},
"OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": { "/tmp/lnmp/nginx-sites/logs/":
{} }, ... }'
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
def main():
argument_spec = dict(
name=dict(type='str', required=True),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
container = client.get_container(client.module.params['name'])
client.module.exit_json(
changed=False,
exists=(True if container else False),
container=container,
)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,308 @@
#!/usr/bin/python
#
# Copyright 2025 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_context_info
short_description: Retrieve information on Docker contexts for the current user
version_added: 4.4.0
description:
- Return information on Docker contexts.
- This includes some generic information, as well as a RV(contexts[].config) dictionary that can be used for module defaults for all community.docker modules
that use the C(community.docker.docker) module defaults group.
extends_documentation_fragment:
- community.docker.attributes
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
only_current:
description:
- If set to V(true), RV(contexts) will just contain the current context and none else.
- If set to V(false) (default), RV(contexts) will list all contexts, unless O(name) is specified.
- Mutually exclusive to O(name).
type: bool
default: false
name:
description:
- A specific Docker CLI context to query.
- The module will fail if this context does not exist. If you simply want to query whether a context exists,
do not specify this parameter and use Jinja2 to search the resulting list for a context of the given name instead.
- Mutually exclusive with O(only_current).
type: str
cli_context:
description:
- Override for the default context's name.
- This is preferably used for context selection when O(only_current=true),
and it is used to compute the return values RV(contexts[].current) and RV(current_context_name).
type: str
author:
- "Felix Fontein (@felixfontein)"
"""
EXAMPLES = r"""
- name: Get infos on contexts
community.docker.docker_context_info:
register: result
- name: Show all contexts
ansible.builtin.debug:
msg: "{{ result.contexts }}"
- name: Get current context
community.docker.docker_context_info:
only_current: true
register: docker_current_context
- name: Run community.docker modules with current context
module_defaults:
group/community.docker.docker: "{{ docker_current_context.contexts[0].config }}"
block:
- name: Task using the current context
community.docker.docker_container:
image: ubuntu:latest
name: ubuntu
state: started
"""
RETURN = r"""
contexts:
description:
- A list of all contexts (O(only_current=false), O(name) not specified),
only the current context (O(only_current=true)),
or the requested context (O(name) specified).
type: list
elements: dict
returned: success
contains:
current:
description:
- Whether this context is the current one.
type: bool
returned: success
sample: true
name:
description:
- The context's name.
type: bool
returned: success
sample: default
description:
description:
- The context's description, if available.
type: bool
returned: success
sample: My context
meta_path:
description:
- The path to the context's meta directory.
- Not present for RV(contexts[].name=default).
type: str
returned: success
sample: /home/felix/.docker/contexts/meta/0123456789abcdef01234567890abcdef0123456789abcdef0123456789abcde
tls_path:
description:
- The path to the context's TLS config directory.
- Not present for RV(contexts[].name=default).
type: str
returned: success
sample: /home/user/.docker/contexts/tls/0123456789abcdef01234567890abcdef0123456789abcdef0123456789abcde/
config:
description:
- In case the context is for Docker, contains option values to configure the community.docker modules to use this context.
- Note that the exact values returned here and their values might change over time if incompatibilities to existing modules are found.
The goal is that this configuration works fine with all modules in this collection, but we do not have the capabilities to
test all possible configuration options at the moment.
type: dict
returned: success
sample: {}
contains:
docker_host:
description:
- The Docker daemon to connect to.
type: str
returned: success and context is for Docker
sample: unix:///var/run/docker.sock
tls:
description:
- Whether the Docker context should use an unvalidated TLS connection.
type: bool
returned: success and context is for Docker
sample: false
ca_path:
description:
- The CA certificate used to validate the Docker daemon's certificate.
type: bool
returned: success, context is for Docker, TLS config is present, and CA cert is present
sample: /path/to/ca-cert.pem
client_cert:
description:
- The client certificate to authenticate with to the Docker daemon.
type: bool
returned: success, context is for Docker, TLS config is present, and client cert info is present
sample: /path/to/client-cert.pem
client_key:
description:
- The client certificate's key to authenticate with to the Docker daemon.
type: bool
returned: success, context is for Docker, TLS config is present, and client cert info is present
sample: /path/to/client-key.pem
validate_certs:
description:
- Whether the Docker context should use a validated TLS connection.
type: bool
returned: success, context is for Docker, and TLS config is present
sample: true
current_context_name:
description:
- The name of the current Docker context.
type: str
returned: success
sample: default
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.six import string_types
from ansible_collections.community.docker.plugins.module_utils._api.context.api import (
ContextAPI,
)
from ansible_collections.community.docker.plugins.module_utils._api.context.config import (
get_current_context_name_with_source,
)
from ansible_collections.community.docker.plugins.module_utils._api.context.context import (
IN_MEMORY,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
ContextException,
DockerException,
)
def tls_context_to_json(context):
if context is None:
return None
return {
'client_cert': context.cert[0] if context.cert else None,
'client_key': context.cert[1] if context.cert else None,
'ca_cert': context.ca_cert,
'verify': context.verify,
# 'ssl_version': context.ssl_version, -- this isn't used anymore
}
def to_bool(value):
return True if value else False
def context_to_json(context, current):
module_config = {}
if 'docker' in context.endpoints:
endpoint = context.endpoints['docker']
if isinstance(endpoint.get('Host'), string_types):
host_str = to_text(endpoint['Host'])
# Adjust protocol name so that it works with the Docker CLI tool as well
proto = None
idx = host_str.find('://')
if idx >= 0:
proto = host_str[:idx]
host_str = host_str[idx + 3:]
if proto in ('http', 'https'):
proto = 'tcp'
if proto == 'http+unix':
proto = 'unix'
if proto:
host_str = "{0}://{1}".format(proto, host_str)
# Create config for the modules
module_config['docker_host'] = host_str
if context.tls_cfg.get('docker'):
tls_cfg = context.tls_cfg['docker']
if tls_cfg.ca_cert:
module_config['ca_path'] = tls_cfg.ca_cert
if tls_cfg.cert:
module_config['client_cert'] = tls_cfg.cert[0]
module_config['client_key'] = tls_cfg.cert[1]
module_config['validate_certs'] = tls_cfg.verify
module_config['tls'] = True
else:
module_config['tls'] = to_bool(endpoint.get('SkipTLSVerify'))
return {
'current': current,
'name': context.name,
'description': context.description,
'meta_path': None if context.meta_path is IN_MEMORY else context.meta_path,
'tls_path': None if context.tls_path is IN_MEMORY else context.tls_path,
'config': module_config,
}
def main():
argument_spec = dict(
only_current=dict(type='bool', default=False),
name=dict(type='str'),
cli_context=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
("only_current", "name"),
],
)
try:
if module.params['cli_context']:
current_context_name, current_context_source = module.params['cli_context'], "cli_context module option"
else:
current_context_name, current_context_source = get_current_context_name_with_source()
if module.params['name']:
contexts = [ContextAPI.get_context(module.params['name'])]
if not contexts[0]:
module.fail_json(msg="There is no context of name {name!r}".format(name=module.params['name']))
elif module.params['only_current']:
contexts = [ContextAPI.get_context(current_context_name)]
if not contexts[0]:
module.fail_json(
msg="There is no context of name {name!r}, which is configured as the default context ({source})".format(
name=current_context_name,
source=current_context_source,
),
)
else:
contexts = ContextAPI.contexts()
json_contexts = sorted([
context_to_json(context, context.name == current_context_name)
for context in contexts
], key=lambda entry: entry['name'])
module.exit_json(
changed=False,
contexts=json_contexts,
current_context_name=current_context_name,
)
except ContextException as e:
module.fail_json(msg='Error when handling Docker contexts: {0}'.format(to_native(e)), exception=traceback.format_exc())
except DockerException as e:
module.fail_json(msg='An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,381 @@
#!/usr/bin/python
#
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_host_info
short_description: Retrieves facts about docker host and lists of objects of the services
description:
- Retrieves facts about a docker host.
- Essentially returns the output of C(docker system info).
- The module also allows to list object names for containers, images, networks and volumes. It also allows to query information
on disk usage.
- The output differs depending on API version of the docker daemon.
- If the docker daemon cannot be contacted or does not meet the API version requirements, the module will fail.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.idempotent_not_modify_state
attributes:
check_mode:
support: full
details:
- This action does not modify state.
diff_mode:
support: N/A
details:
- This action does not modify state.
options:
containers:
description:
- Whether to list containers.
type: bool
default: false
containers_all:
description:
- By default, only running containers are returned.
- This corresponds to the C(--all) option to C(docker container list).
type: bool
default: false
version_added: 3.4.0
containers_filters:
description:
- A dictionary of filter values used for selecting containers to list.
- 'For example, C(until: 24h).'
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) for
more information on possible filters.
type: dict
images:
description:
- Whether to list images.
type: bool
default: false
images_filters:
description:
- A dictionary of filter values used for selecting images to list.
- 'For example, C(dangling: true).'
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) for more
information on possible filters.
type: dict
networks:
description:
- Whether to list networks.
type: bool
default: false
networks_filters:
description:
- A dictionary of filter values used for selecting networks to list.
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) for
more information on possible filters.
type: dict
volumes:
description:
- Whether to list volumes.
type: bool
default: false
volumes_filters:
description:
- A dictionary of filter values used for selecting volumes to list.
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) for more
information on possible filters.
type: dict
disk_usage:
description:
- Summary information on used disk space by all Docker layers.
- The output is a sum of images, volumes, containers and build cache.
type: bool
default: false
verbose_output:
description:
- When set to V(true) and O(networks), O(volumes), O(images), O(containers), or O(disk_usage) is set to V(true) then
output will contain verbose information about objects matching the full output of API method. For details see the
documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
- The verbose output in this module contains only subset of information returned by this module for each type of the
objects.
type: bool
default: false
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get info on docker host
community.docker.docker_host_info:
register: result
- name: Get info on docker host and list images
community.docker.docker_host_info:
images: true
register: result
- name: Get info on docker host and list images matching the filter
community.docker.docker_host_info:
images: true
images_filters:
label: "mylabel"
register: result
- name: Get info on docker host and verbose list images
community.docker.docker_host_info:
images: true
verbose_output: true
register: result
- name: Get info on docker host and used disk space
community.docker.docker_host_info:
disk_usage: true
register: result
- name: Get info on docker host and list containers matching the filter
community.docker.docker_host_info:
containers: true
containers_filters:
label:
- key1=value1
- key2=value2
register: result
- name: Show host information
ansible.builtin.debug:
var: result.host_info
"""
RETURN = r"""
can_talk_to_docker:
description:
- Will be V(true) if the module can talk to the docker daemon.
returned: both on success and on error
type: bool
host_info:
description:
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
returned: always
type: dict
volumes:
description:
- List of dict objects containing the basic information about each volume. Keys matches the C(docker volume ls) output
unless O(verbose_output=true). See description for O(verbose_output).
returned: When O(volumes=true)
type: list
elements: dict
networks:
description:
- List of dict objects containing the basic information about each network. Keys matches the C(docker network ls) output
unless O(verbose_output=true). See description for O(verbose_output).
returned: When O(networks=true)
type: list
elements: dict
containers:
description:
- List of dict objects containing the basic information about each container. Keys matches the C(docker container ls)
output unless O(verbose_output=true). See description for O(verbose_output).
returned: When O(containers=true)
type: list
elements: dict
images:
description:
- List of dict objects containing the basic information about each image. Keys matches the C(docker image ls) output unless
O(verbose_output=true). See description for O(verbose_output).
returned: When O(images=true)
type: list
elements: dict
disk_usage:
description:
- Information on summary disk usage by images, containers and volumes on docker host unless O(verbose_output=true). See
description for O(verbose_output).
returned: When O(disk_usage=true)
type: dict
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
clean_dict_booleans_for_docker_api,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, APIError
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
class DockerHostManager(DockerBaseClass):
def __init__(self, client, results):
super(DockerHostManager, self).__init__()
self.client = client
self.results = results
self.verbose_output = self.client.module.params['verbose_output']
listed_objects = ['volumes', 'networks', 'containers', 'images']
self.results['host_info'] = self.get_docker_host_info()
# At this point we definitely know that we can talk to the Docker daemon
self.results['can_talk_to_docker'] = True
self.client.fail_results['can_talk_to_docker'] = True
if self.client.module.params['disk_usage']:
self.results['disk_usage'] = self.get_docker_disk_usage_facts()
for docker_object in listed_objects:
if self.client.module.params[docker_object]:
returned_name = docker_object
filter_name = docker_object + "_filters"
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name), True)
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
def get_docker_host_info(self):
try:
return self.client.info()
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_disk_usage_facts(self):
try:
if self.verbose_output:
return self.client.df()
else:
return dict(LayersSize=self.client.df()['LayersSize'])
except APIError as exc:
self.client.fail("Error inspecting docker host: %s" % to_native(exc))
def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
items = None
items_list = []
header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
header_volumes = ['Driver', 'Name']
header_images = ['Id', 'RepoTags', 'Created', 'Size']
header_networks = ['Id', 'Driver', 'Name', 'Scope']
filter_arg = dict()
if filters:
filter_arg['filters'] = filters
try:
if docker_object == 'containers':
params = {
'limit': -1,
'all': 1 if self.client.module.params['containers_all'] else 0,
'size': 0,
'trunc_cmd': 0,
'filters': convert_filters(filters) if filters else None,
}
items = self.client.get_json("/containers/json", params=params)
elif docker_object == 'networks':
params = {
'filters': convert_filters(filters or {})
}
items = self.client.get_json("/networks", params=params)
elif docker_object == 'images':
params = {
'only_ids': 0,
'all': 0,
'filters': convert_filters(filters) if filters else None,
}
items = self.client.get_json("/images/json", params=params)
elif docker_object == 'volumes':
params = {
'filters': convert_filters(filters) if filters else None,
}
items = self.client.get_json('/volumes', params=params)
items = items['Volumes']
except APIError as exc:
self.client.fail("Error inspecting docker host for object '%s': %s" % (docker_object, to_native(exc)))
if self.verbose_output:
return items
for item in items:
item_record = dict()
if docker_object == 'containers':
for key in header_containers:
item_record[key] = item.get(key)
elif docker_object == 'networks':
for key in header_networks:
item_record[key] = item.get(key)
elif docker_object == 'images':
for key in header_images:
item_record[key] = item.get(key)
elif docker_object == 'volumes':
for key in header_volumes:
item_record[key] = item.get(key)
items_list.append(item_record)
return items_list
def main():
argument_spec = dict(
containers=dict(type='bool', default=False),
containers_all=dict(type='bool', default=False),
containers_filters=dict(type='dict'),
images=dict(type='bool', default=False),
images_filters=dict(type='dict'),
networks=dict(type='bool', default=False),
networks_filters=dict(type='dict'),
volumes=dict(type='bool', default=False),
volumes_filters=dict(type='dict'),
disk_usage=dict(type='bool', default=False),
verbose_output=dict(type='bool', default=False),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
fail_results=dict(
can_talk_to_docker=False,
),
)
if client.module.params['api_version'] is None or client.module.params['api_version'].lower() == 'auto':
# At this point we know that we can talk to Docker, since we asked it for the API version
client.fail_results['can_talk_to_docker'] = True
try:
results = dict(
changed=False,
)
DockerHostManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,597 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_build
short_description: Build Docker images using Docker buildx
version_added: 3.6.0
description:
- This module allows you to build Docker images using Docker's buildx plugin (BuildKit).
- Note that the module is B(not idempotent) in the sense of classical Ansible modules. The only idempotence check is whether
the built image already exists. This check can be disabled with the O(rebuild) option.
extends_documentation_fragment:
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: partial
details:
- If O(rebuild=always) the module is not idempotent.
options:
name:
description:
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
pulling an image the name can optionally include the tag by appending C(:tag_name).'
- Note that image IDs (hashes) and names with digest cannot be used.
type: str
required: true
tag:
description:
- Tag for the image name O(name) that is to be tagged.
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
type: str
default: latest
path:
description:
- The path for the build environment.
type: path
required: true
dockerfile:
description:
- Provide an alternate name for the Dockerfile to use when building an image.
- This can also include a relative path (relative to O(path)).
type: str
cache_from:
description:
- List of image names to consider as cache source.
type: list
elements: str
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
type: bool
default: false
network:
description:
- The network to use for C(RUN) build instructions.
type: str
nocache:
description:
- Do not use cache when building an image.
type: bool
default: false
etc_hosts:
description:
- Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
- Instead of an IP address, the special value V(host-gateway) can also be used, which resolves to the host's gateway
IP and allows building containers to connect to services running on the host.
type: dict
args:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
type: dict
target:
description:
- When building an image specifies an intermediate build stage by name as a final stage for the resulting image.
type: str
platform:
description:
- Platforms in the format C(os[/arch[/variant]]).
- Since community.docker 3.10.0 this can be a list of platforms, instead of just a single platform.
type: list
elements: str
shm_size:
description:
- Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. Unit can be V(B) (byte), V(K) (kibibyte,
1024B), V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte).
- Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses V(64M).
type: str
labels:
description:
- Dictionary of key value pairs.
type: dict
rebuild:
description:
- Defines the behavior of the module if the image to build (as specified in O(name) and O(tag)) already exists.
type: str
choices:
- never
- always
default: never
secrets:
description:
- Secrets to expose to the build.
type: list
elements: dict
version_added: 3.10.0
suboptions:
id:
description:
- The secret identifier.
- The secret will be made available as a file in the container under C(/run/secrets/<id>).
type: str
required: true
type:
description:
- Type of the secret.
type: str
choices:
file:
- Reads the secret from a file on the target.
- The file must be specified in O(secrets[].src).
env:
- Reads the secret from an environment variable on the target.
- The environment variable must be named in O(secrets[].env).
- Note that this requires the Buildkit plugin to have version 0.6.0 or newer.
value:
- Provides the secret from a given value O(secrets[].value).
- B(Note) that the secret will be passed as an environment variable to C(docker compose). Use another mean of
transport if you consider this not safe enough.
- Note that this requires the Buildkit plugin to have version 0.6.0 or newer.
required: true
src:
description:
- Source path of the secret.
- Only supported and required for O(secrets[].type=file).
type: path
env:
description:
- Environment value of the secret.
- Only supported and required for O(secrets[].type=env).
type: str
value:
description:
- Value of the secret.
- B(Note) that the secret will be passed as an environment variable to C(docker compose). Use another mean of transport
if you consider this not safe enough.
- Only supported and required for O(secrets[].type=value).
type: str
outputs:
description:
- Output destinations.
- You can provide a list of exporters to export the built image in various places. Note that not all exporters might
be supported by the build driver used.
- Note that depending on how this option is used, no image with name O(name) and tag O(tag) might be created, which
can cause the basic idempotency this module offers to not work.
- Providing an empty list to this option is equivalent to not specifying it at all. The default behavior is a single
entry with O(outputs[].type=image).
- B(Note) that since community.docker 4.2.0, an entry for O(name)/O(tag) is added if O(outputs) has at least one entry
and no entry has type O(outputs[].type=image) and includes O(name)/O(tag) in O(outputs[].name). This is because the
module would otherwise pass C(--tag name:image) to the buildx plugin, which for some reason overwrites all images
in O(outputs) by the C(name:image) provided in O(name)/O(tag).
type: list
elements: dict
version_added: 3.10.0
suboptions:
type:
description:
- The type of exporter to use.
type: str
choices:
local:
- This export type writes all result files to a directory on the client. The new files will be owned by the current
user. On multi-platform builds, all results will be put in subdirectories by their platform.
- The destination has to be provided in O(outputs[].dest).
tar:
- This export type export type writes all result files as a single tarball on the client. On multi-platform builds,
all results will be put in subdirectories by their platform.
- The destination has to be provided in O(outputs[].dest).
oci:
- This export type writes the result image or manifest list as an L(OCI image layout,
https://github.com/opencontainers/image-spec/blob/v1.0.1/image-layout.md)
tarball on the client.
- The destination has to be provided in O(outputs[].dest).
docker:
- This export type writes the single-platform result image as a Docker image specification tarball on the client.
Tarballs created by this exporter are also OCI compatible.
- The destination can be provided in O(outputs[].dest). If not specified, the tar will be loaded automatically
to the local image store.
- The Docker context where to import the result can be provided in O(outputs[].context).
image:
- This exporter writes the build result as an image or a manifest list. When using this driver, the image will
appear in C(docker images).
- The image name can be provided in O(outputs[].name). If it is not provided, O(name) and O(tag) will be used.
- Optionally, image can be automatically pushed to a registry by setting O(outputs[].push=true).
required: true
dest:
description:
- The destination path.
- Required for O(outputs[].type=local), O(outputs[].type=tar), O(outputs[].type=oci).
- Optional for O(outputs[].type=docker).
type: path
context:
description:
- Name for the Docker context where to import the result.
- Optional for O(outputs[].type=docker).
type: str
name:
description:
- Name(s) under which the image is stored under.
- If not provided, O(name) and O(tag) will be used.
- Optional for O(outputs[].type=image).
- This can be a list of strings since community.docker 4.2.0.
type: list
elements: str
push:
description:
- Whether to push the built image to a registry.
- Only used for O(outputs[].type=image).
type: bool
default: false
requirements:
- "Docker CLI with Docker buildx plugin"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_push
- module: community.docker.docker_image_tag
"""
EXAMPLES = r"""
- name: Build Python 3.12 image
community.docker.docker_image_build:
name: localhost/python/3.12:latest
path: /home/user/images/python
dockerfile: Dockerfile-3.12
- name: Build multi-platform image
community.docker.docker_image_build:
name: multi-platform-image
tag: "1.5.2"
path: /home/user/images/multi-platform
platform:
- linux/amd64
- linux/arm64/v8
"""
RETURN = r"""
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
command:
description: The command executed.
returned: success and for some failures
type: list
elements: str
version_added: 4.2.0
"""
import base64
import os
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
clean_dict_booleans_for_docker_api,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
def convert_to_bytes(value, module, name, unlimited_value=None):
if value is None:
return value
try:
if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
return unlimited_value
return human_to_bytes(value)
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
def dict_to_list(dictionary, concat='='):
return ['%s%s%s' % (k, concat, v) for k, v in sorted(dictionary.items())]
def _quote_csv(input):
if input.strip() == input and all(i not in input for i in '",\r\n'):
return input
return '"{0}"'.format(input.replace('"', '""'))
class ImageBuilder(DockerBaseClass):
def __init__(self, client):
super(ImageBuilder, self).__init__()
self.client = client
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.cache_from = parameters['cache_from']
self.pull = parameters['pull']
self.network = parameters['network']
self.nocache = parameters['nocache']
self.etc_hosts = clean_dict_booleans_for_docker_api(parameters['etc_hosts'])
self.args = clean_dict_booleans_for_docker_api(parameters['args'])
self.target = parameters['target']
self.platform = parameters['platform']
self.shm_size = convert_to_bytes(parameters['shm_size'], self.client.module, 'shm_size')
self.labels = clean_dict_booleans_for_docker_api(parameters['labels'])
self.rebuild = parameters['rebuild']
self.secrets = parameters['secrets']
self.outputs = parameters['outputs']
buildx = self.client.get_client_plugin_info('buildx')
if buildx is None:
self.fail('Docker CLI {0} does not have the buildx plugin installed'.format(self.client.get_cli()))
buildx_version = buildx['Version'].lstrip('v')
if self.secrets:
for secret in self.secrets:
if secret['type'] in ('env', 'value'):
if LooseVersion(buildx_version) < LooseVersion('0.6.0'):
self.fail('The Docker buildx plugin has version {version}, but 0.6.0 is needed for secrets of type=env and type=value'.format(
version=buildx_version,
))
if self.outputs and len(self.outputs) > 1:
if LooseVersion(buildx_version) < LooseVersion('0.13.0'):
self.fail('The Docker buildx plugin has version {version}, but 0.13.0 is needed to specify more than one output'.format(
version=buildx_version,
))
self.path = parameters['path']
if not os.path.isdir(self.path):
self.fail('"{0}" is not an existing directory'.format(self.path))
self.dockerfile = parameters['dockerfile']
if self.dockerfile and not os.path.isfile(os.path.join(self.path, self.dockerfile)):
self.fail('"{0}" is not an existing file'.format(os.path.join(self.path, self.dockerfile)))
self.name = parameters['name']
self.tag = parameters['tag']
if not is_valid_tag(self.tag, allow_empty=True):
self.fail('"{0}" is not a valid docker tag'.format(self.tag))
if is_image_name_id(self.name):
self.fail('Image name must not be a digest')
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if is_image_name_id(self.tag):
self.fail('Image name must not contain a digest, but have a tag')
if self.outputs:
found = False
name_tag = '%s:%s' % (self.name, self.tag)
for output in self.outputs:
if output['type'] == 'image':
if not output['name']:
# Since we no longer pass --tag if --output is provided, we need to set this manually
output['name'] = [name_tag]
if output['name'] and name_tag in output['name']:
found = True
if not found:
self.outputs.append({
'type': 'image',
'name': [name_tag],
'push': False,
})
if LooseVersion(buildx_version) < LooseVersion('0.13.0'):
self.fail(
"The output does not include an image with name {name_tag}, and the Docker"
" buildx plugin has version {version} which only supports one output.".format(
name_tag=name_tag,
version=buildx_version,
),
)
def fail(self, msg, **kwargs):
self.client.fail(msg, **kwargs)
def add_list_arg(self, args, option, values):
for value in values:
args.extend([option, value])
def add_args(self, args):
environ_update = {}
if not self.outputs:
args.extend(['--tag', '%s:%s' % (self.name, self.tag)])
if self.dockerfile:
args.extend(['--file', os.path.join(self.path, self.dockerfile)])
if self.cache_from:
self.add_list_arg(args, '--cache-from', self.cache_from)
if self.pull:
args.append('--pull')
if self.network:
args.extend(['--network', self.network])
if self.nocache:
args.append('--no-cache')
if self.etc_hosts:
self.add_list_arg(args, '--add-host', dict_to_list(self.etc_hosts, ':'))
if self.args:
self.add_list_arg(args, '--build-arg', dict_to_list(self.args))
if self.target:
args.extend(['--target', self.target])
if self.platform:
for platform in self.platform:
args.extend(['--platform', platform])
if self.shm_size:
args.extend(['--shm-size', str(self.shm_size)])
if self.labels:
self.add_list_arg(args, '--label', dict_to_list(self.labels))
if self.secrets:
random_prefix = None
for index, secret in enumerate(self.secrets):
if secret['type'] == 'file':
args.extend(['--secret', 'id={id},type=file,src={src}'.format(id=secret['id'], src=secret['src'])])
if secret['type'] == 'env':
args.extend(['--secret', 'id={id},type=env,env={env}'.format(id=secret['id'], env=secret['src'])])
if secret['type'] == 'value':
# We pass values on using environment variables. The user has been warned in the documentation
# that they should only use this mechanism when being comfortable with it.
if random_prefix is None:
# Use /dev/urandom to generate some entropy to make the environment variable's name unguessable
random_prefix = base64.b64encode(os.urandom(16)).decode('utf-8').replace('=', '')
env_name = 'ANSIBLE_DOCKER_COMPOSE_ENV_SECRET_{random}_{id}'.format(
random=random_prefix,
id=index,
)
environ_update[env_name] = secret['value']
args.extend(['--secret', 'id={id},type=env,env={env}'.format(id=secret['id'], env=env_name)])
if self.outputs:
for output in self.outputs:
subargs = []
if output['type'] == 'local':
subargs.extend(['type=local', 'dest={dest}'.format(dest=output['dest'])])
if output['type'] == 'tar':
subargs.extend(['type=tar', 'dest={dest}'.format(dest=output['dest'])])
if output['type'] == 'oci':
subargs.extend(['type=oci', 'dest={dest}'.format(dest=output['dest'])])
if output['type'] == 'docker':
subargs.append('type=docker')
if output['dest'] is not None:
subargs.append('dest={dest}'.format(dest=output['dest']))
if output['context'] is not None:
subargs.append('context={context}'.format(context=output['context']))
if output['type'] == 'image':
subargs.append('type=image')
if output['name'] is not None:
subargs.append('name={name}'.format(name=','.join(output['name'])))
if output['push']:
subargs.append('push=true')
if subargs:
args.extend(['--output', ','.join(_quote_csv(subarg) for subarg in subargs)])
return environ_update
def build_image(self):
image = self.client.find_image(self.name, self.tag)
results = dict(
changed=False,
actions=[],
image=image or {},
)
if image:
if self.rebuild == 'never':
return results
results['changed'] = True
if not self.check_mode:
args = ['buildx', 'build', '--progress', 'plain']
environ_update = self.add_args(args)
args.extend(['--', self.path])
rc, stdout, stderr = self.client.call_cli(*args, environ_update=environ_update)
if rc != 0:
self.fail('Building %s:%s failed' % (self.name, self.tag), stdout=to_native(stdout), stderr=to_native(stderr), command=args)
results['stdout'] = to_native(stdout)
results['stderr'] = to_native(stderr)
results['image'] = self.client.find_image(self.name, self.tag) or {}
results['command'] = args
return results
def main():
argument_spec = dict(
name=dict(type='str', required=True),
tag=dict(type='str', default='latest'),
path=dict(type='path', required=True),
dockerfile=dict(type='str'),
cache_from=dict(type='list', elements='str'),
pull=dict(type='bool', default=False),
network=dict(type='str'),
nocache=dict(type='bool', default=False),
etc_hosts=dict(type='dict'),
args=dict(type='dict'),
target=dict(type='str'),
platform=dict(type='list', elements='str'),
shm_size=dict(type='str'),
labels=dict(type='dict'),
rebuild=dict(type='str', choices=['never', 'always'], default='never'),
secrets=dict(
type='list',
elements='dict',
options=dict(
id=dict(type='str', required=True),
type=dict(type='str', choices=['file', 'env', 'value'], required=True),
src=dict(type='path'),
env=dict(type='str'),
value=dict(type='str', no_log=True),
),
required_if=[
('type', 'file', ['src']),
('type', 'env', ['env']),
('type', 'value', ['value']),
],
mutually_exclusive=[
('src', 'env', 'value'),
],
no_log=False,
),
outputs=dict(
type='list',
elements='dict',
options=dict(
type=dict(type='str', choices=['local', 'tar', 'oci', 'docker', 'image'], required=True),
dest=dict(type='path'),
context=dict(type='str'),
name=dict(type='list', elements='str'),
push=dict(type='bool', default=False),
),
required_if=[
('type', 'local', ['dest']),
('type', 'tar', ['dest']),
('type', 'oci', ['dest']),
],
mutually_exclusive=[
('dest', 'name'),
('dest', 'push'),
('context', 'name'),
('context', 'push'),
],
),
)
client = AnsibleModuleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
needs_api_version=False,
)
try:
results = ImageBuilder(client).build_image()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,283 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_export
short_description: Export (archive) Docker images
version_added: 3.7.0
description:
- Creates an archive (tarball) from one or more Docker images.
- This can be copied to another machine and loaded with M(community.docker.docker_image_load).
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: full
options:
names:
description:
- 'One or more image names. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When
pushing or pulling an image the name can optionally include the tag by appending C(:tag_name).'
- Note that image IDs (hashes) can also be used.
type: list
elements: str
required: true
aliases:
- name
tag:
description:
- Tag for the image name O(name) that is to be tagged.
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
type: str
default: latest
path:
description:
- The C(.tar) file the image should be exported to.
type: path
force:
description:
- Export the image even if the C(.tar) file already exists and seems to contain the right image.
type: bool
default: false
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image
- module: community.docker.docker_image_info
- module: community.docker.docker_image_load
"""
EXAMPLES = r"""
- name: Export an image
community.docker.docker_image_export:
name: pacur/centos-7
path: /tmp/centos-7.tar
- name: Export multiple images
community.docker.docker_image_export:
names:
- hello-world:latest
- pacur/centos-7:latest
path: /tmp/various.tar
"""
RETURN = r"""
images:
description: Image inspection results for the affected images.
returned: success
type: list
elements: dict
sample: []
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.image_archive import (
load_archived_image_manifest,
api_image_id,
ImageArchiveInvalidException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.constants import (
DEFAULT_DATA_CHUNK_SIZE,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
class ImageExportManager(DockerBaseClass):
def __init__(self, client):
super(ImageExportManager, self).__init__()
self.client = client
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.path = parameters['path']
self.force = parameters['force']
self.tag = parameters['tag']
if not is_valid_tag(self.tag, allow_empty=True):
self.fail('"{0}" is not a valid docker tag'.format(self.tag))
# If name contains a tag, it takes precedence over tag parameter.
self.names = []
for name in parameters['names']:
if is_image_name_id(name):
self.names.append({'id': name, 'joined': name})
else:
repo, repo_tag = parse_repository_tag(name)
if not repo_tag:
repo_tag = self.tag
self.names.append({'name': repo, 'tag': repo_tag, 'joined': '%s:%s' % (repo, repo_tag)})
if not self.names:
self.fail('At least one image name must be specified')
def fail(self, msg):
self.client.fail(msg)
def get_export_reason(self):
if self.force:
return 'Exporting since force=true'
try:
archived_images = load_archived_image_manifest(self.path)
if archived_images is None:
return 'Overwriting since no image is present in archive'
except ImageArchiveInvalidException as exc:
self.log('Unable to extract manifest summary from archive: %s' % to_native(exc))
return 'Overwriting an unreadable archive file'
left_names = list(self.names)
for archived_image in archived_images:
found = False
for i, name in enumerate(left_names):
if name['id'] == api_image_id(archived_image.image_id) and [name['joined']] == archived_image.repo_tags:
del left_names[i]
found = True
break
if not found:
return 'Overwriting archive since it contains unexpected image %s named %s' % (
archived_image.image_id, ', '.join(archived_image.repo_tags)
)
if left_names:
return 'Overwriting archive since it is missing image(s) %s' % (', '.join([name['joined'] for name in left_names]))
return None
def write_chunks(self, chunks):
try:
with open(self.path, 'wb') as fd:
for chunk in chunks:
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.path, to_native(exc)))
def export_images(self):
image_names = [name['joined'] for name in self.names]
image_names_str = ', '.join(image_names)
if len(image_names) == 1:
self.log("Getting archive of image %s" % image_names[0])
try:
chunks = self.client._stream_raw_result(
self.client._get(self.client._url('/images/{0}/get', image_names[0]), stream=True),
DEFAULT_DATA_CHUNK_SIZE,
False,
)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_names[0], to_native(exc)))
else:
self.log("Getting archive of images %s" % image_names_str)
try:
chunks = self.client._stream_raw_result(
self.client._get(
self.client._url('/images/get'),
stream=True,
params={'names': image_names},
),
DEFAULT_DATA_CHUNK_SIZE,
False,
)
except Exception as exc:
self.fail("Error getting images %s - %s" % (image_names_str, to_native(exc)))
self.write_chunks(chunks)
def run(self):
tag = self.tag
if not tag:
tag = "latest"
images = []
for name in self.names:
if 'id' in name:
image = self.client.find_image_by_id(name['id'], accept_missing_image=True)
else:
image = self.client.find_image(name=name['name'], tag=name['tag'])
if not image:
self.fail("Image %s not found" % name['joined'])
images.append(image)
# Will have a 'sha256:' prefix
name['id'] = image['Id']
results = {
'changed': False,
'images': images,
}
reason = self.get_export_reason()
if reason is not None:
results['msg'] = reason
results['changed'] = True
if not self.check_mode:
self.export_images()
return results
def main():
argument_spec = dict(
path=dict(type='path'),
force=dict(type='bool', default=False),
names=dict(type='list', elements='str', required=True, aliases=['name']),
tag=dict(type='str', default='latest'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
results = ImageExportManager(client).run()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,197 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_info
short_description: Inspect docker images
description:
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
- If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists locally,
you can call the module with the image name, then check whether the result list is empty (image does not exist) or has
one element (the image exists locally).
- The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with O(community.docker.docker_image#module:source=pull)
to ensure an image is pulled.
notes:
- This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]), where C(tag)
is optional. If a tag is not provided, V(latest) will be used. Instead of image names, also image IDs can be used.
- If no name is provided, a list of all images will be returned.
type: list
elements: str
requirements:
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
"""
EXAMPLES = r"""
- name: Inspect a single image
community.docker.docker_image_info:
name: pacur/centos-7
- name: Inspect multiple images
community.docker.docker_image_info:
name:
- pacur/centos-7
- sinatra
register: result
- name: Make sure that both images pacur/centos-7 and sinatra exist locally
ansible.builtin.assert:
that:
- result.images | length == 2
"""
RETURN = r"""
images:
description:
- Inspection results for the selected images.
- The list only contains inspection results of images existing locally.
returned: always
type: list
elements: dict
sample: [{"Architecture": "amd64", "Author": "", "Comment": "", "Config": {"AttachStderr": false, "AttachStdin": false,
"AttachStdout": false, "Cmd": ["/etc/docker/registry/config.yml"], "Domainname": "", "Entrypoint": ["/bin/registry"],
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"], "ExposedPorts": {"5000/tcp": {}},
"Hostname": "e5c68db50333", "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", "Labels": {},
"OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": {"/var/lib/registry": {}},
"WorkingDir": ""}, "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610", "ContainerConfig": {
"AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": ["/bin/sh", "-c", '#(nop) CMD ["/etc/docker/registry/config.yml"]'],
"Domainname": "", "Entrypoint": ["/bin/registry"], "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],
"ExposedPorts": {"5000/tcp": {}}, "Hostname": "e5c68db50333", "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {}, "OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": {"/var/lib/registry": {}},
"WorkingDir": ""}, "Created": "2016-03-08T21:08:15.399680378Z", "DockerVersion": "1.9.1", "GraphDriver": {"Data": null,
"Name": "aufs"}, "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08", "Name": "registry:2",
"Os": "linux", "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805", "RepoDigests": [], "RepoTags": [
"registry:2"], "Size": 0, "VirtualSize": 165808884}]
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
self.name = self.client.module.params.get('name')
self.log("Gathering facts for images: %s" % (str(self.name)))
if self.name:
self.results['images'] = self.get_facts()
else:
self.results['images'] = self.get_all_images()
def fail(self, msg):
self.client.fail(msg)
def get_facts(self):
'''
Lookup and inspect each image name found in the names parameter.
:returns array of image dictionaries
'''
results = []
names = self.name
if not isinstance(names, list):
names = [names]
for name in names:
if is_image_name_id(name):
self.log('Fetching image %s (ID)' % (name))
image = self.client.find_image_by_id(name, accept_missing_image=True)
else:
repository, tag = parse_repository_tag(name)
if not tag:
tag = 'latest'
self.log('Fetching image %s:%s' % (repository, tag))
image = self.client.find_image(name=repository, tag=tag)
if image:
results.append(image)
return results
def get_all_images(self):
results = []
params = {
'only_ids': 0,
'all': 0,
}
images = self.client.get_json("/images/json", params=params)
for image in images:
try:
inspection = self.client.get_json('/images/{0}/json', image['Id'])
except NotFound:
inspection = None
except Exception as exc:
self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc)))
results.append(inspection)
return results
def main():
argument_spec = dict(
name=dict(type='list', elements='str'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
results = dict(
changed=False,
images=[]
)
ImageManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,196 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_load
short_description: Load docker image(s) from archives
version_added: 1.3.0
description:
- Load one or multiple Docker images from a C(.tar) archive, and return information on the loaded image(s).
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: none
options:
path:
description:
- The path to the C(.tar) archive to load Docker image(s) from.
type: path
required: true
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_export
- module: community.docker.docker_image_push
- module: community.docker.docker_image_remove
- module: community.docker.docker_image_tag
"""
EXAMPLES = r"""
- name: Load all image(s) from the given tar file
community.docker.docker_image_load:
path: /path/to/images.tar
register: result
- name: Print the loaded image names
ansible.builtin.debug:
msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
"""
RETURN = r"""
image_names:
description: List of image names and IDs loaded from the archive.
returned: success
type: list
elements: str
sample:
- 'hello-world:latest'
- 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
images:
description: Image inspection results for the loaded images.
returned: success
type: list
elements: dict
sample: []
"""
import errno
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.path = parameters['path']
self.load_images()
@staticmethod
def _extract_output_line(line, output):
'''
Extract text line from stream output and, if found, adds it to output.
'''
if 'stream' in line or 'status' in line:
# Make sure we have a string (assuming that line['stream'] and
# line['status'] are either not defined, falsish, or a string)
text_line = line.get('stream') or line.get('status') or ''
output.extend(text_line.splitlines())
def load_images(self):
'''
Load images from a .tar archive
'''
# Load image(s) from file
load_output = []
try:
self.log("Opening image {0}".format(self.path))
with open(self.path, 'rb') as image_tar:
self.log("Loading images from {0}".format(self.path))
res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
for line in self.client._stream_helper(res, decode=True):
self.log(line, pretty_print=True)
self._extract_output_line(line, load_output)
except EnvironmentError as exc:
if exc.errno == errno.ENOENT:
self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc)))
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
except Exception as exc:
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
# Collect loaded images
loaded_images = []
for line in load_output:
if line.startswith('Loaded image:'):
loaded_images.append(line[len('Loaded image:'):].strip())
if line.startswith('Loaded image ID:'):
loaded_images.append(line[len('Loaded image ID:'):].strip())
if not loaded_images:
self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
images = []
for image_name in loaded_images:
if is_image_name_id(image_name):
images.append(self.client.find_image_by_id(image_name))
elif ':' in image_name:
image_name, tag = image_name.rsplit(':', 1)
images.append(self.client.find_image(image_name, tag))
else:
self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name))
self.results['image_names'] = loaded_images
self.results['images'] = images
self.results['changed'] = True
self.results['stdout'] = '\n'.join(load_output)
def main():
client = AnsibleDockerClient(
argument_spec=dict(
path=dict(type='path', required=True),
),
supports_check_mode=False,
)
try:
results = dict(
image_names=[],
images=[],
)
ImageManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,223 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_pull
short_description: Pull Docker images from registries
version_added: 3.6.0
description:
- Pulls a Docker image from a registry.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: partial
details:
- When trying to pull an image with O(pull=always), the module assumes this is always changed in check mode.
- When check mode is combined with diff mode, the pulled image's ID is always shown as V(unknown) in the diff.
diff_mode:
support: full
idempotent:
support: full
options:
name:
description:
- Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name).
- The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest).
type: str
required: true
tag:
description:
- Used to select an image when pulling. Defaults to V(latest).
- If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored.
type: str
default: latest
platform:
description:
- Ask for this specific platform when pulling.
type: str
pull:
description:
- Determines when to pull an image.
- If V(always), will always pull the image.
- If V(not_present), will only pull the image if no image of the name exists on the current Docker daemon, or if O(platform)
does not match.
type: str
choices:
- always
- not_present
default: always
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_pull
- module: community.docker.docker_image_remove
- module: community.docker.docker_image_tag
"""
EXAMPLES = r"""
- name: Pull an image
community.docker.docker_image_pull:
name: pacur/centos-7
# Select platform for pulling. If not specified, will pull whatever docker prefers.
platform: amd64
"""
RETURN = r"""
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
from ansible_collections.community.docker.plugins.module_utils._platform import (
normalize_platform_string,
compare_platform_strings,
compose_platform_string,
)
def image_info(image):
result = {}
if image:
result['id'] = image['Id']
else:
result['exists'] = False
return result
class ImagePuller(DockerBaseClass):
def __init__(self, client):
super(ImagePuller, self).__init__()
self.client = client
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters['name']
self.tag = parameters['tag']
self.platform = parameters['platform']
self.pull_mode = parameters['pull']
if is_image_name_id(self.name):
self.client.fail("Cannot pull an image by ID")
if not is_valid_tag(self.tag, allow_empty=True):
self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag))
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
def pull(self):
image = self.client.find_image(name=self.name, tag=self.tag)
results = dict(
changed=False,
actions=[],
image=image or {},
diff=dict(before=image_info(image), after=image_info(image)),
)
if image and self.pull_mode == 'not_present':
if self.platform is None:
return results
host_info = self.client.info()
wanted_platform = normalize_platform_string(
self.platform,
daemon_os=host_info.get('OSType'),
daemon_arch=host_info.get('Architecture'),
)
image_platform = compose_platform_string(
os=image.get('Os'),
arch=image.get('Architecture'),
variant=image.get('Variant'),
daemon_os=host_info.get('OSType'),
daemon_arch=host_info.get('Architecture'),
)
if compare_platform_strings(wanted_platform, image_platform):
return results
results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
if self.check_mode:
results['changed'] = True
results['diff']['after'] = image_info(dict(Id='unknown'))
else:
results['image'], not_changed = self.client.pull_image(self.name, tag=self.tag, platform=self.platform)
results['changed'] = not not_changed
results['diff']['after'] = image_info(results['image'])
return results
def main():
argument_spec = dict(
name=dict(type='str', required=True),
tag=dict(type='str', default='latest'),
platform=dict(type='str'),
pull=dict(type='str', choices=['always', 'not_present'], default='always'),
)
option_minimal_versions = dict(
platform=dict(docker_api_version='1.32'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
option_minimal_versions=option_minimal_versions,
)
try:
results = ImagePuller(client).pull()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,197 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_push
short_description: Push Docker images to registries
version_added: 3.6.0
description:
- Pushes a Docker image to a registry.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: full
options:
name:
description:
- Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name).
- The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest).
type: str
required: true
tag:
description:
- Select which image to push. Defaults to V(latest).
- If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored.
type: str
default: latest
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_pull
- module: community.docker.docker_image_remove
- module: community.docker.docker_image_tag
"""
EXAMPLES = r"""
- name: Push an image
community.docker.docker_image_push:
name: registry.example.com:5000/repo/image
tag: latest
"""
RETURN = r"""
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
get_config_header,
resolve_repository_name,
)
class ImagePusher(DockerBaseClass):
def __init__(self, client):
super(ImagePusher, self).__init__()
self.client = client
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters['name']
self.tag = parameters['tag']
if is_image_name_id(self.name):
self.client.fail("Cannot push an image by ID")
if not is_valid_tag(self.tag, allow_empty=True):
self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag))
# If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if is_image_name_id(self.tag):
self.client.fail("Cannot push an image by digest")
if not is_valid_tag(self.tag, allow_empty=False):
self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag))
def push(self):
image = self.client.find_image(name=self.name, tag=self.tag)
if not image:
self.client.fail('Cannot find image %s:%s' % (self.name, self.tag))
results = dict(
changed=False,
actions=[],
image=image,
)
push_registry, push_repo = resolve_repository_name(self.name)
try:
results['actions'].append('Pushed image %s:%s' % (self.name, self.tag))
headers = {}
header = get_config_header(self.client, push_registry)
if header:
headers['X-Registry-Auth'] = header
response = self.client._post_json(
self.client._url("/images/{0}/push", self.name),
data=None,
headers=headers,
stream=True,
params={'tag': self.tag},
)
self.client._raise_for_status(response)
for line in self.client._stream_helper(response, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
if status == 'Pushing':
results['changed'] = True
except Exception as exc:
if 'unauthorized' in str(exc):
if 'authentication required' in str(exc):
self.client.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(push_registry, push_repo, self.tag, to_native(exc), push_registry))
else:
self.client.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(push_registry, push_repo, self.tag, str(exc)))
self.client.fail("Error pushing image %s:%s: %s" % (self.name, self.tag, to_native(exc)))
return results
def main():
argument_spec = dict(
name=dict(type='str', required=True),
tag=dict(type='str', default='latest'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=False,
)
try:
results = ImagePusher(client).push()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,268 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_remove
short_description: Remove Docker images
version_added: 3.6.0
description:
- Remove Docker images from the Docker daemon.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: full
options:
name:
description:
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
pulling an image the name can optionally include the tag by appending C(:tag_name).'
- Note that image IDs (hashes) can also be used.
type: str
required: true
tag:
description:
- Tag for the image name O(name) that is to be tagged.
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
type: str
default: latest
force:
description:
- Un-tag and remove all images matching the specified name.
type: bool
default: false
prune:
description:
- Delete untagged parent images.
type: bool
default: true
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_load
- module: community.docker.docker_image_pull
- module: community.docker.docker_image_tag
"""
EXAMPLES = r"""
- name: Remove an image
community.docker.docker_image_remove:
name: pacur/centos-7
"""
RETURN = r"""
image:
description:
- Image inspection results for the affected image before removal.
- Empty if the image was not found.
returned: success
type: dict
sample: {}
deleted:
description:
- The digests of the images that were deleted.
returned: success
type: list
elements: str
sample: []
untagged:
description:
- The digests of the images that were untagged.
returned: success
type: list
elements: str
sample: []
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
class ImageRemover(DockerBaseClass):
def __init__(self, client):
super(ImageRemover, self).__init__()
self.client = client
self.check_mode = self.client.check_mode
self.diff = self.client.module._diff
parameters = self.client.module.params
self.name = parameters['name']
self.tag = parameters['tag']
self.force = parameters['force']
self.prune = parameters['prune']
if not is_valid_tag(self.tag, allow_empty=True):
self.fail('"{0}" is not a valid docker tag'.format(self.tag))
# If name contains a tag, it takes precedence over tag parameter.
if not is_image_name_id(self.name):
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
def fail(self, msg):
self.client.fail(msg)
def get_diff_state(self, image):
if not image:
return dict(exists=False)
return dict(
exists=True,
id=image['Id'],
tags=sorted(image.get('RepoTags') or []),
digests=sorted(image.get('RepoDigests') or []),
)
def absent(self):
results = dict(
changed=False,
actions=[],
image={},
deleted=[],
untagged=[],
)
name = self.name
if is_image_name_id(name):
image = self.client.find_image_by_id(name, accept_missing_image=True)
else:
image = self.client.find_image(name, self.tag)
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if self.diff:
results['diff'] = dict(before=self.get_diff_state(image))
if not image:
if self.diff:
results['diff']['after'] = self.get_diff_state(image)
return results
results['changed'] = True
results['actions'].append("Removed image %s" % (name))
results['image'] = image
if not self.check_mode:
try:
res = self.client.delete_json('/images/{0}', name, params={'force': self.force, 'noprune': not self.prune})
except NotFound:
# If the image vanished while we were trying to remove it, do not fail
res = []
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, to_native(exc)))
for entry in res:
if entry.get('Untagged'):
results['untagged'].append(entry['Untagged'])
if entry.get('Deleted'):
results['deleted'].append(entry['Deleted'])
results['untagged'] = sorted(results['untagged'])
results['deleted'] = sorted(results['deleted'])
if self.diff:
image_after = self.client.find_image_by_id(image['Id'], accept_missing_image=True)
results['diff']['after'] = self.get_diff_state(image_after)
elif is_image_name_id(name):
results['deleted'].append(image['Id'])
results['untagged'] = sorted((image.get('RepoTags') or []) + (image.get('RepoDigests') or []))
if not self.force and results['untagged']:
self.fail('Cannot delete image by ID that is still in use - use force=true')
if self.diff:
results['diff']['after'] = self.get_diff_state({})
elif is_image_name_id(self.tag):
results['untagged'].append(name)
if len(image.get('RepoTags') or []) < 1 and len(image.get('RepoDigests') or []) < 2:
results['deleted'].append(image['Id'])
if self.diff:
results['diff']['after'] = self.get_diff_state(image)
try:
results['diff']['after']['digests'].remove(name)
except ValueError:
pass
else:
results['untagged'].append(name)
if len(image.get('RepoTags') or []) < 2 and len(image.get('RepoDigests') or []) < 1:
results['deleted'].append(image['Id'])
if self.diff:
results['diff']['after'] = self.get_diff_state(image)
try:
results['diff']['after']['tags'].remove(name)
except ValueError:
pass
return results
def main():
argument_spec = dict(
name=dict(type='str', required=True),
tag=dict(type='str', default='latest'),
force=dict(type='bool', default=False),
prune=dict(type='bool', default=True),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
results = ImageRemover(client).absent()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,275 @@
#!/usr/bin/python
#
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_image_tag
short_description: Tag Docker images with new names and/or tags
version_added: 3.6.0
description:
- This module allows to tag Docker images with new names and/or tags.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: full
options:
name:
description:
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
pulling an image the name can optionally include the tag by appending C(:tag_name).'
- Note that image IDs (hashes) can also be used.
type: str
required: true
tag:
description:
- Tag for the image name O(name) that is to be tagged.
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
type: str
default: latest
repository:
description:
- List of new image names to tag the image as.
- Expects format C(repository:tag). If no tag is provided, will use the value of the O(tag) parameter if present, or
V(latest).
type: list
elements: str
required: true
existing_images:
description:
- Defines the behavior if the image to be tagged already exists and is another image than the one identified by O(name)
and O(tag).
- If set to V(keep), the tagged image is kept.
- If set to V(overwrite), the tagged image is overwritten by the specified one.
type: str
choices:
- keep
- overwrite
default: overwrite
requirements:
- "Docker API >= 1.25"
author:
- Felix Fontein (@felixfontein)
seealso:
- module: community.docker.docker_image_push
- module: community.docker.docker_image_remove
"""
EXAMPLES = r"""
- name: Tag Python 3.12 image with two new names
community.docker.docker_image_tag:
name: python:3.12
repository:
- python-3:3.12
- local-registry:5000/python-3/3.12:latest
"""
RETURN = r"""
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
tagged_images:
description:
- A list of images that got tagged.
returned: success
type: list
elements: str
sample:
- python-3:3.12
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
is_image_name_id,
is_valid_tag,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
parse_repository_tag,
)
def convert_to_bytes(value, module, name, unlimited_value=None):
if value is None:
return value
try:
if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
return unlimited_value
return human_to_bytes(value)
except ValueError as exc:
module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
def image_info(name, tag, image):
result = dict(name=name, tag=tag)
if image:
result['id'] = image['Id']
else:
result['exists'] = False
return result
class ImageTagger(DockerBaseClass):
def __init__(self, client):
super(ImageTagger, self).__init__()
self.client = client
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.name = parameters['name']
self.tag = parameters['tag']
if not is_valid_tag(self.tag, allow_empty=True):
self.fail('"{0}" is not a valid docker tag'.format(self.tag))
# If name contains a tag, it takes precedence over tag parameter.
if not is_image_name_id(self.name):
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
self.keep_existing_images = parameters['existing_images'] == 'keep'
# Make sure names in repository are valid images, and add tag if needed
self.repositories = []
for i, repository in enumerate(parameters['repository']):
if is_image_name_id(repository):
self.fail("repository[%d] must not be an image ID; got: %s" % (i + 1, repository))
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = parameters['tag']
elif not is_valid_tag(repo_tag, allow_empty=False):
self.fail("repository[%d] must not have a digest; got: %s" % (i + 1, repository))
self.repositories.append((repo, repo_tag))
def fail(self, msg):
self.client.fail(msg)
def tag_image(self, image, name, tag):
tagged_image = self.client.find_image(name=name, tag=tag)
if tagged_image:
# Idempotency checks
if tagged_image['Id'] == image['Id']:
return (
False,
"target image already exists (%s) and is as expected" % tagged_image['Id'],
tagged_image,
)
if self.keep_existing_images:
return (
False,
"target image already exists (%s) and is not as expected, but kept" % tagged_image['Id'],
tagged_image,
)
msg = "target image existed (%s) and was not as expected" % tagged_image['Id']
else:
msg = "target image did not exist"
if not self.check_mode:
try:
params = {
'tag': tag,
'repo': name,
'force': True,
}
res = self.client._post(self.client._url('/images/{0}/tag', image['Id']), params=params)
self.client._raise_for_status(res)
if res.status_code != 201:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image as %s:%s - %s" % (name, tag, to_native(exc)))
return True, msg, tagged_image
def tag_images(self):
if is_image_name_id(self.name):
image = self.client.find_image_by_id(self.name, accept_missing_image=False)
else:
image = self.client.find_image(name=self.name, tag=self.tag)
if not image:
self.fail("Cannot find image %s:%s" % (self.name, self.tag))
before = []
after = []
tagged_images = []
results = dict(
changed=False,
actions=[],
image=image,
tagged_images=tagged_images,
diff=dict(before=dict(images=before), after=dict(images=after)),
)
for repository, tag in self.repositories:
tagged, msg, old_image = self.tag_image(image, repository, tag)
before.append(image_info(repository, tag, old_image))
after.append(image_info(repository, tag, image if tagged else old_image))
if tagged:
results['changed'] = True
results['actions'].append('Tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg))
tagged_images.append('%s:%s' % (repository, tag))
else:
results['actions'].append('Not tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg))
return results
def main():
argument_spec = dict(
name=dict(type='str', required=True),
tag=dict(type='str', default='latest'),
repository=dict(type='list', elements='str', required=True),
existing_images=dict(type='str', choices=['keep', 'overwrite'], default='overwrite'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
results = ImageTagger(client).tag_images()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,446 @@
#!/usr/bin/python
#
# Copyright (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
# Chris Houseknecht, <house@redhat.com>
# James Tanner, <jtanner@redhat.com>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_login
short_description: Log into a Docker registry
description:
- Provides functionality similar to the C(docker login) command.
- Authenticate with a docker registry and add the credentials to your local Docker config file respectively the credentials
store associated to the registry. Adding the credentials to the config files resp. the credential store allows future
connections to the registry using tools such as Ansible's Docker modules, the Docker CLI and Docker SDK for Python without
needing to provide credentials.
- Running in check mode will perform the authentication without updating the config file.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: full
options:
registry_url:
description:
- The registry URL.
type: str
default: "https://index.docker.io/v1/"
aliases:
- registry
- url
username:
description:
- The username for the registry account.
- Required when O(state=present).
type: str
password:
description:
- The plaintext password for the registry account.
- Required when O(state=present).
type: str
reauthorize:
description:
- Refresh existing authentication found in the configuration file.
type: bool
default: false
aliases:
- reauth
config_path:
description:
- Custom path to the Docker CLI configuration file.
type: path
default: ~/.docker/config.json
aliases:
- dockercfg_path
state:
description:
- This controls the current state of the user. V(present) will login in a user, V(absent) will log them out.
- To logout you only need the registry server, which defaults to DockerHub.
- Before 2.1 you could ONLY log in.
- Docker does not support 'logout' with a custom config file.
type: str
default: 'present'
choices: ['present', 'absent']
requirements:
- "Docker API >= 1.25"
author:
- Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
- Chris Houseknecht (@chouseknecht)
"""
EXAMPLES = r"""
- name: Log into DockerHub
community.docker.docker_login:
username: docker
password: rekcod
- name: Log into private registry and force re-authorization
community.docker.docker_login:
registry_url: your.private.registry.io
username: yourself
password: secrets3
reauthorize: true
- name: Log into DockerHub using a custom config file
community.docker.docker_login:
username: docker
password: rekcod
config_path: /tmp/.mydockercfg
- name: Log out of DockerHub
community.docker.docker_login:
state: absent
"""
RETURN = r"""
login_results:
description: Results from the login.
returned: when O(state=present)
type: dict
sample: {"serveraddress": "localhost:5000", "username": "testuser"}
"""
import base64
import json
import os
import traceback
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DEFAULT_DOCKER_REGISTRY,
DockerBaseClass,
)
from ansible_collections.community.docker.plugins.module_utils._api import auth
from ansible_collections.community.docker.plugins.module_utils._api.auth import decode_auth
from ansible_collections.community.docker.plugins.module_utils._api.credentials.errors import CredentialsNotFound
from ansible_collections.community.docker.plugins.module_utils._api.credentials.store import Store
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
class DockerFileStore(object):
'''
A custom credential store class that implements only the functionality we need to
update the docker config file when no credential helpers is provided.
'''
program = "<legacy config>"
def __init__(self, config_path):
self._config_path = config_path
# Make sure we have a minimal config if none is available.
self._config = dict(
auths=dict()
)
try:
# Attempt to read the existing config.
with open(self._config_path, "r") as f:
config = json.load(f)
except (ValueError, IOError):
# No config found or an invalid config found so we'll ignore it.
config = dict()
# Update our internal config with what ever was loaded.
self._config.update(config)
@property
def config_path(self):
'''
Return the config path configured in this DockerFileStore instance.
'''
return self._config_path
def get(self, server):
'''
Retrieve credentials for `server` if there are any in the config file.
Otherwise raise a `StoreError`
'''
server_creds = self._config['auths'].get(server)
if not server_creds:
raise CredentialsNotFound('No matching credentials')
(username, password) = decode_auth(server_creds['auth'])
return dict(
Username=username,
Secret=password
)
def _write(self):
'''
Write config back out to disk.
'''
# Make sure directory exists
dir = os.path.dirname(self._config_path)
if not os.path.exists(dir):
os.makedirs(dir)
# Write config; make sure it has permissions 0x600
content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
try:
os.write(f, content)
finally:
os.close(f)
def store(self, server, username, password):
'''
Add a credentials for `server` to the current configuration.
'''
b64auth = base64.b64encode(
to_bytes(username) + b':' + to_bytes(password)
)
auth = to_text(b64auth)
# build up the auth structure
if 'auths' not in self._config:
self._config['auths'] = dict()
self._config['auths'][server] = dict(
auth=auth
)
self._write()
def erase(self, server):
'''
Remove credentials for the given server from the configuration.
'''
if 'auths' in self._config and server in self._config['auths']:
self._config['auths'].pop(server)
self._write()
class LoginManager(DockerBaseClass):
def __init__(self, client, results):
super(LoginManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.registry_url = parameters.get('registry_url')
self.username = parameters.get('username')
self.password = parameters.get('password')
self.reauthorize = parameters.get('reauthorize')
self.config_path = parameters.get('config_path')
self.state = parameters.get('state')
def run(self):
'''
Do the actual work of this task here. This allows instantiation for partial
testing.
'''
if self.state == 'present':
self.login()
else:
self.logout()
def fail(self, msg):
self.client.fail(msg)
def _login(self, reauth):
if self.config_path and os.path.exists(self.config_path):
self.client._auth_configs = auth.load_config(
self.config_path, credstore_env=self.client.credstore_env
)
elif not self.client._auth_configs or self.client._auth_configs.is_empty:
self.client._auth_configs = auth.load_config(
credstore_env=self.client.credstore_env
)
authcfg = self.client._auth_configs.resolve_authconfig(self.registry_url)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username') == self.username and not reauth:
return authcfg
req_data = {
'username': self.username,
'password': self.password,
'email': None,
'serveraddress': self.registry_url,
}
response = self.client._post_json(self.client._url('/auth'), data=req_data)
if response.status_code == 200:
self.client._auth_configs.add_auth(self.registry_url or auth.INDEX_NAME, req_data)
return self.client._result(response, json=True)
def login(self):
'''
Log into the registry with provided username/password. On success update the config
file with the new authorization.
:return: None
'''
self.results['actions'].append("Logged into %s" % (self.registry_url))
self.log("Log into %s with username %s" % (self.registry_url, self.username))
try:
response = self._login(self.reauthorize)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
# If user is already logged in, then response contains password for user
if 'password' in response:
# This returns correct password if user is logged in and wrong password is given.
# So if it returns another password as we passed, and the user did not request to
# reauthorize, still do it.
if not self.reauthorize and response['password'] != self.password:
try:
response = self._login(True)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
response.pop('password', None)
self.results['login_result'] = response
self.update_credentials()
def logout(self):
'''
Log out of the registry. On success update the config file.
:return: None
'''
# Get the configuration store.
store = self.get_credential_store_instance(self.registry_url, self.config_path)
try:
store.get(self.registry_url)
except CredentialsNotFound:
# get raises an exception on not found.
self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
self.results['changed'] = False
return
if not self.check_mode:
store.erase(self.registry_url)
self.results['changed'] = True
def update_credentials(self):
'''
If the authorization is not stored attempt to store authorization values via
the appropriate credential helper or to the config file.
:return: None
'''
# Check to see if credentials already exist.
store = self.get_credential_store_instance(self.registry_url, self.config_path)
try:
current = store.get(self.registry_url)
except CredentialsNotFound:
# get raises an exception on not found.
current = dict(
Username='',
Secret=''
)
if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
if not self.check_mode:
store.store(self.registry_url, self.username, self.password)
self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
store.program, self.registry_url))
self.results['changed'] = True
def get_credential_store_instance(self, registry, dockercfg_path):
'''
Return an instance of docker.credentials.Store used by the given registry.
:return: A Store or None
:rtype: Union[docker.credentials.Store, NoneType]
'''
credstore_env = self.client.credstore_env
config = auth.load_config(config_path=dockercfg_path)
store_name = auth.get_credential_store(config, registry)
# Make sure that there is a credential helper before trying to instantiate a
# Store object.
if store_name:
self.log("Found credential store %s" % store_name)
return Store(store_name, environment=credstore_env)
return DockerFileStore(dockercfg_path)
def main():
argument_spec = dict(
registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
username=dict(type='str'),
password=dict(type='str', no_log=True),
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
state=dict(type='str', default='present', choices=['present', 'absent']),
config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
)
required_if = [
('state', 'present', ['username', 'password']),
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
)
try:
results = dict(
changed=False,
actions=[],
login_result={}
)
manager = LoginManager(client, results)
manager.run()
if 'actions' in results:
del results['actions']
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,758 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_network
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the C(docker network) CLI subcommand.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: partial
details:
- If O(force=true) the module is not idempotent.
options:
name:
description:
- Name of the network to operate on.
type: str
required: true
aliases:
- network_name
config_from:
description:
- Specifies the config only network to use the config from.
type: str
version_added: 3.10.0
config_only:
description:
- Sets that this is a config only network.
type: bool
version_added: 3.10.0
connected:
description:
- List of container names or container IDs to connect to a network.
- Please note that the module only makes sure that these containers are connected to the network, but does not care
about connection options. If you rely on specific IP addresses and so on, use the M(community.docker.docker_container)
module to ensure your containers are correctly connected to this network.
type: list
elements: str
default: []
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
type: str
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
type: dict
default: {}
force:
description:
- With state V(present) will disconnect all containers for existing networks, delete the network and re-create the network.
- This option is required if you have changed the IPAM or driver options and want an existing network to be updated
to use the new options.
type: bool
default: false
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
- Use O(appends) to leave existing containers connected.
type: bool
default: false
aliases:
- incremental
enable_ipv4:
description:
- Enable IPv4 networking.
- This is enabled by default, but can be explicitly disabled.
- Requires Docker API 1.47 or newer.
type: bool
version_added: 4.5.0
enable_ipv6:
description:
- Enable IPv6 networking.
type: bool
ingress:
description:
- Enable Swarm routing-mesh.
type: bool
version_added: 4.2.0
ipam_driver:
description:
- Specify an IPAM driver.
type: str
ipam_driver_options:
description:
- Dictionary of IPAM driver options.
type: dict
ipam_config:
description:
- List of IPAM config blocks. Consult L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam)
for valid options and values. Note that O(ipam_config[].iprange) is spelled differently here (we use the notation
from the Docker SDK for Python).
type: list
elements: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
state:
description:
- V(absent) deletes the network. If a network has connected containers, these will be detached from the network.
- V(present) creates the network, if it does not already exist with the specified parameters, and connects the list
of containers provided by the O(connected) parameter. Containers not on the list will be disconnected. An empty list
will leave no containers connected to the network. Use the O(appends) option to leave existing containers connected.
Use the O(force) options to force re-creation of the network.
type: str
default: present
choices:
- absent
- present
internal:
description:
- Restrict external access to the network.
type: bool
labels:
description:
- Dictionary of labels.
type: dict
default: {}
scope:
description:
- Specify the network's scope.
type: str
choices:
- local
- global
- swarm
attachable:
description:
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect
to the network.
type: bool
notes:
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates
the network. It does not try to reconnect containers, except the ones listed in (O(connected), and even for these, it
does not consider specific connection options like fixed IP addresses or MAC addresses. If you need more control over
how the containers are connected to the network, loop the M(community.docker.docker_container) module to loop over your
containers to make sure they are connected properly.
- The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services
are connected to the network, deleting the network will fail. When network options are changed, the network has to be
deleted and recreated, so this will fail as well.
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
- "Dave Bendit (@DBendit)"
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Create a network
community.docker.docker_network:
name: network_one
- name: Remove all but selected list of containers
community.docker.docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
community.docker.docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
community.docker.docker_network:
name: network_one
connected:
- container_a
appends: true
- name: Create a network with driver options
community.docker.docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
- name: Create a network with custom IPAM config
community.docker.docker_network:
name: network_three
ipam_config:
- subnet: 172.23.27.0/24
gateway: 172.23.27.2
iprange: 172.23.27.0/26
aux_addresses:
host1: 172.23.27.3
host2: 172.23.27.4
- name: Create a network with labels
community.docker.docker_network:
name: network_four
labels:
key1: value1
key2: value2
- name: Create a network with IPv6 IPAM config
community.docker.docker_network:
name: network_ipv6_one
enable_ipv6: true
ipam_config:
- subnet: fdd1:ac8c:0557:7ce1::/64
- name: Create a network with IPv6 and custom IPv4 IPAM config
community.docker.docker_network:
name: network_ipv6_two
enable_ipv6: true
ipam_config:
- subnet: 172.24.27.0/24
- subnet: fdd1:ac8c:0557:7ce2::/64
- name: Delete a network, disconnecting all containers
community.docker.docker_network:
name: network_one
state: absent
"""
RETURN = r"""
network:
description:
- Network inspection results for the affected network.
returned: success
type: dict
sample: {}
"""
import re
import traceback
import time
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
sanitize_labels,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.name = None
self.connected = None
self.config_from = None
self.config_only = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_driver_options = None
self.ipam_config = None
self.appends = None
self.force = None
self.internal = None
self.labels = None
self.debug = None
self.enable_ipv4 = None
self.enable_ipv6 = None
self.scope = None
self.attachable = None
self.ingress = None
for key, value in client.module.params.items():
setattr(self, key, value)
# config_only sets driver to 'null' (and scope to 'local') so force that here. Otherwise we get
# diffs of 'null' --> 'bridge' given that the driver option defaults to 'bridge'.
if self.config_only:
self.driver = 'null'
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
def validate_cidr(cidr):
"""Validate CIDR. Return IP version of a CIDR string on success.
:param cidr: Valid CIDR
:type cidr: str
:return: ``ipv4`` or ``ipv6``
:rtype: str
:raises ValueError: If ``cidr`` is not a valid CIDR
"""
if CIDR_IPV4.match(cidr):
return 'ipv4'
elif CIDR_IPV6.match(cidr):
return 'ipv6'
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
def normalize_ipam_config_key(key):
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
:param key: Docker API key
:type key: str
:return Ansible module key
:rtype str
"""
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower())
def dicts_are_essentially_equal(a, b):
"""Make sure that a is a subset of b, where None entries of a are ignored."""
for k, v in a.items():
if v is None:
continue
if b.get(k) != v:
return False
return True
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if self.parameters.ipam_config:
try:
for ipam_config in self.parameters.ipam_config:
validate_cidr(ipam_config['subnet'])
except ValueError as e:
self.client.fail(to_native(e))
if self.parameters.driver_options:
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_network(self):
return self.client.get_network(name=self.parameters.name)
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
differences = DifferenceTracker()
if self.parameters.config_only is not None and self.parameters.config_only != net.get('ConfigOnly', False):
differences.add('config_only',
parameter=self.parameters.config_only,
active=net.get('ConfigOnly', False))
if self.parameters.config_from is not None and self.parameters.config_from != net.get('ConfigFrom', {}).get('Network', ''):
differences.add('config_from',
parameter=self.parameters.config_from,
active=net.get('ConfigFrom', {}).get('Network', ''))
if self.parameters.driver and self.parameters.driver != net['Driver']:
differences.add('driver',
parameter=self.parameters.driver,
active=net['Driver'])
if self.parameters.driver_options:
if not net.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=net.get('Options'))
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
parameter=value,
active=net['Options'].get(key))
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
differences.add('ipam_driver',
parameter=self.parameters.ipam_driver,
active=net.get('IPAM'))
if self.parameters.ipam_driver_options is not None:
ipam_driver_options = net['IPAM'].get('Options') or {}
if ipam_driver_options != self.parameters.ipam_driver_options:
differences.add('ipam_driver_options',
parameter=self.parameters.ipam_driver_options,
active=ipam_driver_options)
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
if not net.get('IPAM') or not net['IPAM']['Config']:
differences.add('ipam_config',
parameter=self.parameters.ipam_config,
active=net.get('IPAM', {}).get('Config'))
else:
# Put network's IPAM config into the same format as module's IPAM config
net_ipam_configs = []
for net_ipam_config in net['IPAM']['Config']:
config = dict()
for k, v in net_ipam_config.items():
config[normalize_ipam_config_key(k)] = v
net_ipam_configs.append(config)
# Compare lists of dicts as sets of dicts
for idx, ipam_config in enumerate(self.parameters.ipam_config):
net_config = dict()
for net_ipam_config in net_ipam_configs:
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
net_config = net_ipam_config
break
for key, value in ipam_config.items():
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value != net_config.get(key):
differences.add('ipam_config[%s].%s' % (idx, key),
parameter=value,
active=net_config.get(key))
if self.parameters.enable_ipv4 is not None and self.parameters.enable_ipv4 != net.get('EnableIPv4', False):
differences.add('enable_ipv4',
parameter=self.parameters.enable_ipv4,
active=net.get('EnableIPv4', False))
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
differences.add('enable_ipv6',
parameter=self.parameters.enable_ipv6,
active=net.get('EnableIPv6', False))
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
differences.add('internal',
parameter=self.parameters.internal,
active=net.get('Internal'))
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
differences.add('scope',
parameter=self.parameters.scope,
active=net.get('Scope'))
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
differences.add('attachable',
parameter=self.parameters.attachable,
active=net.get('Attachable'))
if self.parameters.ingress is not None and self.parameters.ingress != net.get('Ingress', False):
differences.add('ingress',
parameter=self.parameters.ingress,
active=net.get('Ingress'))
if self.parameters.labels:
if not net.get('Labels'):
differences.add('labels',
parameter=self.parameters.labels,
active=net.get('Labels'))
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
parameter=value,
active=net['Labels'].get(key))
return not differences.empty, differences
def create_network(self):
if not self.existing_network:
data = {
'Name': self.parameters.name,
'Driver': self.parameters.driver,
'Options': self.parameters.driver_options,
'IPAM': None,
'CheckDuplicate': None,
}
if self.parameters.config_only is not None:
data['ConfigOnly'] = self.parameters.config_only
if self.parameters.config_from:
data['ConfigFrom'] = {'Network': self.parameters.config_from}
if self.parameters.enable_ipv6 is not None:
data['EnableIPv6'] = self.parameters.enable_ipv6
if self.parameters.enable_ipv4 is not None:
data['EnableIPv4'] = self.parameters.enable_ipv4
if self.parameters.internal:
data['Internal'] = True
if self.parameters.scope is not None:
data['Scope'] = self.parameters.scope
if self.parameters.attachable is not None:
data['Attachable'] = self.parameters.attachable
if self.parameters.ingress is not None:
data['Ingress'] = self.parameters.ingress
if self.parameters.labels is not None:
data["Labels"] = self.parameters.labels
ipam_pools = []
if self.parameters.ipam_config:
for ipam_pool in self.parameters.ipam_config:
ipam_pools.append({
'Subnet': ipam_pool['subnet'],
'IPRange': ipam_pool['iprange'],
'Gateway': ipam_pool['gateway'],
'AuxiliaryAddresses': ipam_pool['aux_addresses'],
})
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
# Only add IPAM if a driver was specified or if IPAM parameters were
# specified. Leaving this parameter out can significantly speed up
# creation; on my machine creation with this option needs ~15 seconds,
# and without just a few seconds.
data['IPAM'] = {
'Driver': self.parameters.ipam_driver,
'Config': ipam_pools or [],
'Options': self.parameters.ipam_driver_options,
}
if not self.check_mode:
resp = self.client.post_json_to_json('/networks/create', data=data)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.delete_call('/networks/{0}', self.parameters.name)
if self.existing_network.get('Scope', 'local') == 'swarm':
while self.get_existing_network():
time.sleep(0.1)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
if not self.existing_network:
return False
return container_name in container_names_in_network(self.existing_network)
def is_container_exist(self, container_name):
try:
container = self.client.get_container(container_name)
return bool(container)
except DockerException as e:
self.client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
self.client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name) and self.is_container_exist(name):
if not self.check_mode:
data = {
"Container": name,
"EndpointConfig": None,
}
self.client.post_json('/networks/{0}/connect', self.parameters.name, data=data)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(name), parameter=True, active=False)
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.get_network(name=self.parameters.name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
data = {"Container": container_name, "Force": True}
self.client.post_json('/networks/{0}/disconnect', self.parameters.name, data=data)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(container_name),
parameter=False,
active=True)
def present(self):
different = False
differences = DifferenceTracker()
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
network_facts = self.get_existing_network()
self.results['network'] = network_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
self.remove_network()
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['network_name']),
config_from=dict(type='str'),
config_only=dict(type='bool'),
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_driver_options=dict(type='dict'),
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv4=dict(type='bool'),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
ingress=dict(type='bool'),
)
option_minimal_versions = dict(
config_from=dict(docker_api_version='1.30'),
config_only=dict(docker_api_version='1.30'),
scope=dict(docker_api_version='1.30'),
attachable=dict(docker_api_version='1.26'),
enable_ipv4=dict(docker_api_version='1.47'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
sanitize_labels(client.module.params['labels'], 'labels', client)
try:
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,115 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_network_info
short_description: Retrieves facts about docker network
description:
- Retrieves facts about a docker network.
- Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
returns for a non-absent network.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- The name of the network to inspect.
- When identifying an existing network name may be a name or a long or short network ID.
type: str
required: true
author:
- "Dave Bendit (@DBendit)"
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get infos on network
community.docker.docker_network_info:
name: mydata
register: result
- name: Does network exist?
ansible.builtin.debug:
msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
- name: Print information about network
ansible.builtin.debug:
var: result.network
when: result.exists
"""
RETURN = r"""
exists:
description:
- Returns whether the network exists.
type: bool
returned: always
sample: true
network:
description:
- Facts representing the current state of the network. Matches the docker inspection output.
- Will be V(none) if network does not exist.
returned: always
type: dict
sample: {"Attachable": false, "ConfigFrom": {"Network": ""}, "ConfigOnly": false, "Containers": {}, "Created": "2018-12-07T01:47:51.250835114-06:00",
"Driver": "bridge", "EnableIPv6": false, "IPAM": {"Config": [{"Gateway": "192.168.96.1", "Subnet": "192.168.96.0/20"}],
"Driver": "default", "Options": null}, "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a", "Ingress": false,
"Internal": false, "Labels": {}, "Name": "ansible-test-f2700bba", "Options": {}, "Scope": "local"}
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
def main():
argument_spec = dict(
name=dict(type='str', required=True),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
network = client.get_network(client.module.params['name'])
client.module.exit_json(
changed=False,
exists=(True if network else False),
network=network,
)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,305 @@
#!/usr/bin/python
#
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_node
short_description: Manage Docker Swarm node
description:
- Manages the Docker nodes through a Swarm Manager.
- This module allows to change the node's role, its availability, and to modify, add or remove node labels.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: full
options:
hostname:
description:
- The hostname or ID of node as registered in Swarm.
- If more than one node is registered using the same hostname the ID must be used, otherwise module will fail.
type: str
required: true
labels:
description:
- User-defined key/value metadata that will be assigned as node attribute.
- Label operations in this module apply to the docker swarm node specified by O(hostname). Use M(community.docker.docker_swarm)
module to add/modify/remove swarm cluster labels.
- The actual state of labels assigned to the node when module completes its work depends on O(labels_state) and O(labels_to_remove)
parameters values. See description below.
type: dict
labels_state:
description:
- It defines the operation on the labels assigned to node and labels specified in O(labels) option.
- Set to V(merge) to combine labels provided in O(labels) with those already assigned to the node. If no labels are
assigned then it will add listed labels. For labels that are already assigned to the node, it will update their values.
The labels not specified in O(labels) will remain unchanged. If O(labels) is empty then no changes will be made.
- Set to V(replace) to replace all assigned labels with provided ones. If O(labels) is empty then all labels assigned
to the node will be removed.
type: str
default: 'merge'
choices:
- merge
- replace
labels_to_remove:
description:
- List of labels that will be removed from the node configuration. The list has to contain only label names, not their
values.
- If the label provided on the list is not assigned to the node, the entry is ignored.
- If the label is both on the O(labels_to_remove) and O(labels), then value provided in O(labels) remains assigned to
the node.
- If O(labels_state=replace) and O(labels) is not provided or empty then all labels assigned to node are removed and
O(labels_to_remove) is ignored.
type: list
elements: str
availability:
description: Node availability to assign. If not provided then node availability remains unchanged.
choices:
- active
- pause
- drain
type: str
role:
description: Node role to assign. If not provided then node role remains unchanged.
choices:
- manager
- worker
type: str
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
- Docker API >= 1.25
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
- Thierry Bouvet (@tbouvet)
"""
EXAMPLES = r"""
- name: Set node role
community.docker.docker_node:
hostname: mynode
role: manager
- name: Set node availability
community.docker.docker_node:
hostname: mynode
availability: drain
- name: Replace node labels with new labels
community.docker.docker_node:
hostname: mynode
labels:
key: value
labels_state: replace
- name: Merge node labels and new labels
community.docker.docker_node:
hostname: mynode
labels:
key: value
- name: Remove all labels assigned to node
community.docker.docker_node:
hostname: mynode
labels_state: replace
- name: Remove selected labels from the node
community.docker.docker_node:
hostname: mynode
labels_to_remove:
- key1
- key2
"""
RETURN = r"""
node:
description: Information about node after 'update' operation.
returned: success
type: dict
"""
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils.common import (
DockerBaseClass,
RequestException,
)
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
from ansible_collections.community.docker.plugins.module_utils.util import sanitize_labels
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
# Spec
self.name = None
self.labels = None
self.labels_state = None
self.labels_to_remove = None
# Node
self.availability = None
self.role = None
for key, value in client.module.params.items():
setattr(self, key, value)
sanitize_labels(self.labels, "labels", client)
class SwarmNodeManager(DockerBaseClass):
def __init__(self, client, results):
super(SwarmNodeManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
self.client.fail_task_if_not_swarm_manager()
self.parameters = TaskParameters(client)
self.node_update()
def node_update(self):
if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
self.client.fail("This node is not part of a swarm.")
return
if self.client.check_if_swarm_node_is_down():
self.client.fail("Can not update the node. The node is down.")
try:
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
except APIError as exc:
self.client.fail("Failed to get node information for %s" % to_native(exc))
changed = False
node_spec = dict(
Availability=self.parameters.availability,
Role=self.parameters.role,
Labels=self.parameters.labels,
)
if self.parameters.role is None:
node_spec['Role'] = node_info['Spec']['Role']
else:
if not node_info['Spec']['Role'] == self.parameters.role:
node_spec['Role'] = self.parameters.role
changed = True
if self.parameters.availability is None:
node_spec['Availability'] = node_info['Spec']['Availability']
else:
if not node_info['Spec']['Availability'] == self.parameters.availability:
node_info['Spec']['Availability'] = self.parameters.availability
changed = True
if self.parameters.labels_state == 'replace':
if self.parameters.labels is None:
node_spec['Labels'] = {}
if node_info['Spec']['Labels']:
changed = True
else:
if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
node_spec['Labels'] = self.parameters.labels
changed = True
elif self.parameters.labels_state == 'merge':
node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
if self.parameters.labels is not None:
for key, value in self.parameters.labels.items():
if node_spec['Labels'].get(key) != value:
node_spec['Labels'][key] = value
changed = True
if self.parameters.labels_to_remove is not None:
for key in self.parameters.labels_to_remove:
if self.parameters.labels is not None:
if not self.parameters.labels.get(key):
if node_spec['Labels'].get(key):
node_spec['Labels'].pop(key)
changed = True
else:
self.client.module.warn(
"Label '%s' listed both in 'labels' and 'labels_to_remove'. "
"Keeping the assigned label value."
% to_native(key))
else:
if node_spec['Labels'].get(key):
node_spec['Labels'].pop(key)
changed = True
if changed is True:
if not self.check_mode:
try:
self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
node_spec=node_spec)
except APIError as exc:
self.client.fail("Failed to update node : %s" % to_native(exc))
self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
self.results['changed'] = changed
else:
self.results['node'] = node_info
self.results['changed'] = changed
def main():
argument_spec = dict(
hostname=dict(type='str', required=True),
labels=dict(type='dict'),
labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
labels_to_remove=dict(type='list', elements='str'),
availability=dict(type='str', choices=['active', 'pause', 'drain']),
role=dict(type='str', choices=['worker', 'manager']),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='2.4.0',
)
try:
results = dict(
changed=False,
)
SwarmNodeManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,160 @@
#!/usr/bin/python
#
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_node_info
short_description: Retrieves facts about docker swarm node from Swarm Manager
description:
- Retrieves facts about a docker node.
- Essentially returns the output of C(docker node inspect <name>).
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- The name of the node to inspect.
- The list of nodes names to inspect.
- If empty then return information of all nodes in Swarm cluster.
- When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
- If O(self=true) then this parameter is ignored.
type: list
elements: str
self:
description:
- If V(true), queries the node (that is, the docker daemon) the module communicates with.
- If V(true) then O(name) is ignored.
- If V(false) then query depends on O(name) presence and value.
type: bool
default: false
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get info on all nodes
community.docker.docker_node_info:
register: result
- name: Get info on node
community.docker.docker_node_info:
name: mynode
register: result
- name: Get info on list of nodes
community.docker.docker_node_info:
name:
- mynode1
- mynode2
register: result
- name: Get info on host if it is Swarm Manager
community.docker.docker_node_info:
self: true
register: result
"""
RETURN = r"""
nodes:
description:
- Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
- Can contain multiple entries if more than one node provided in O(name), or O(name) is not provided.
- If O(name) contains a list of nodes, the output will provide information on all nodes registered at the swarm, including
nodes that left the swarm but have not been removed from the cluster on swarm managers and nodes that are unreachable.
returned: always
type: list
elements: dict
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common import (
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
try:
from docker.errors import DockerException
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
def get_node_facts(client):
results = []
if client.module.params['self'] is True:
self_node_id = client.get_swarm_node_id()
node_info = client.get_node_inspect(node_id=self_node_id)
results.append(node_info)
return results
if client.module.params['name'] is None:
node_info = client.get_all_nodes_inspect()
return node_info
nodes = client.module.params['name']
if not isinstance(nodes, list):
nodes = [nodes]
for next_node_name in nodes:
next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
if next_node_info:
results.append(next_node_info)
return results
def main():
argument_spec = dict(
name=dict(type='list', elements='str'),
self=dict(type='bool', default=False),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='2.4.0',
)
client.fail_task_if_not_swarm_manager()
try:
nodes = get_node_facts(client)
client.module.exit_json(
changed=False,
nodes=nodes,
)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,395 @@
#!/usr/bin/python
# coding: utf-8
#
# Copyright (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
# Copyright (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_plugin
short_description: Manage Docker plugins
version_added: 1.3.0
description:
- This module allows to install, delete, enable and disable Docker plugins.
- Performs largely the same function as the C(docker plugin) CLI subcommand.
notes:
- The C(--grant-all-permissions) CLI flag is true by default in this module.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: full
options:
plugin_name:
description:
- Name of the plugin to operate on.
required: true
type: str
state:
description:
- V(absent) remove the plugin.
- V(present) install the plugin, if it does not already exist.
- V(enable) enable the plugin.
- V(disable) disable the plugin.
default: present
choices:
- absent
- present
- enable
- disable
type: str
alias:
description:
- Local name for plugin.
type: str
version_added: 1.8.0
plugin_options:
description:
- Dictionary of plugin settings.
type: dict
default: {}
force_remove:
description:
- Remove even if the plugin is enabled.
default: false
type: bool
enable_timeout:
description:
- Timeout in seconds.
type: int
default: 0
author:
- Sakar Mehra (@sakar97)
- Vladimir Porshkevich (@porshkevich)
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Install a plugin
community.docker.docker_plugin:
plugin_name: plugin_one
state: present
- name: Remove a plugin
community.docker.docker_plugin:
plugin_name: plugin_one
state: absent
- name: Enable the plugin
community.docker.docker_plugin:
plugin_name: plugin_one
state: enable
- name: Disable the plugin
community.docker.docker_plugin:
plugin_name: plugin_one
state: disable
- name: Install a plugin with options
community.docker.docker_plugin:
plugin_name: weaveworks/net-plugin:latest_release
plugin_options:
IPALLOC_RANGE: "10.32.0.0/12"
WEAVE_PASSWORD: "PASSWORD"
"""
RETURN = r"""
plugin:
description:
- Plugin inspection results for the affected plugin.
returned: success
type: dict
sample: {}
actions:
description:
- List of actions performed during task execution.
returned: when O(state) is not V(absent)
type: list
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
DifferenceTracker,
)
from ansible_collections.community.docker.plugins.module_utils._api import auth
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.plugin_name = None
self.alias = None
self.plugin_options = None
self.debug = None
self.force_remove = None
self.enable_timeout = None
for key, value in client.module.params.items():
setattr(self, key, value)
def prepare_options(options):
return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else []
def parse_options(options_list):
return dict(x.split('=', 1) for x in options_list) if options_list else {}
class DockerPluginManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.preferred_name = self.parameters.alias or self.parameters.plugin_name
self.check_mode = self.client.check_mode
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.actions = []
self.changed = False
self.existing_plugin = self.get_existing_plugin()
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
elif state == 'enable':
self.enable()
elif state == 'disable':
self.disable()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.diff = self.diff_result
def get_existing_plugin(self):
try:
return self.client.get_json('/plugins/{0}/json', self.preferred_name)
except NotFound:
return None
except APIError as e:
self.client.fail(to_native(e))
def has_different_config(self):
"""
Return the list of differences between the current parameters and the existing plugin.
:return: list of options that differ
"""
differences = DifferenceTracker()
if self.parameters.plugin_options:
settings = self.existing_plugin.get('Settings')
if not settings:
differences.add('plugin_options', parameters=self.parameters.plugin_options, active=settings)
else:
existing_options = parse_options(settings.get('Env'))
for key, value in self.parameters.plugin_options.items():
if ((not existing_options.get(key) and value) or
not value or
value != existing_options[key]):
differences.add('plugin_options.%s' % key,
parameter=value,
active=existing_options.get(key))
return differences
def install_plugin(self):
if not self.existing_plugin:
if not self.check_mode:
try:
# Get privileges
headers = {}
registry, repo_name = auth.resolve_repository_name(self.parameters.plugin_name)
header = auth.get_config_header(self.client, registry)
if header:
headers['X-Registry-Auth'] = header
privileges = self.client.get_json('/plugins/privileges', params={'remote': self.parameters.plugin_name}, headers=headers)
# Pull plugin
params = {
'remote': self.parameters.plugin_name,
}
if self.parameters.alias:
params['name'] = self.parameters.alias
response = self.client._post_json(self.client._url('/plugins/pull'), params=params, headers=headers, data=privileges, stream=True)
self.client._raise_for_status(response)
for data in self.client._stream_helper(response, decode=True):
pass
# Inspect and configure plugin
self.existing_plugin = self.client.get_json('/plugins/{0}/json', self.preferred_name)
if self.parameters.plugin_options:
data = prepare_options(self.parameters.plugin_options)
self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Installed plugin %s" % self.preferred_name)
self.changed = True
def remove_plugin(self):
force = self.parameters.force_remove
if self.existing_plugin:
if not self.check_mode:
try:
self.client.delete_call('/plugins/{0}', self.preferred_name, params={'force': force})
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Removed plugin %s" % self.preferred_name)
self.changed = True
def update_plugin(self):
if self.existing_plugin:
differences = self.has_different_config()
if not differences.empty:
if not self.check_mode:
try:
data = prepare_options(self.parameters.plugin_options)
self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Updated plugin %s settings" % self.preferred_name)
self.changed = True
else:
self.client.fail("Cannot update the plugin: Plugin does not exist")
def present(self):
differences = DifferenceTracker()
if self.existing_plugin:
differences = self.has_different_config()
self.diff_tracker.add('exists', parameter=True, active=self.existing_plugin is not None)
if self.existing_plugin:
self.update_plugin()
else:
self.install_plugin()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.actions = None
def absent(self):
self.remove_plugin()
def enable(self):
timeout = self.parameters.enable_timeout
if self.existing_plugin:
if not self.existing_plugin.get('Enabled'):
if not self.check_mode:
try:
self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Enabled plugin %s" % self.preferred_name)
self.changed = True
else:
self.install_plugin()
if not self.check_mode:
try:
self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Enabled plugin %s" % self.preferred_name)
self.changed = True
def disable(self):
if self.existing_plugin:
if self.existing_plugin.get('Enabled'):
if not self.check_mode:
try:
self.client.post_json('/plugins/{0}/disable', self.preferred_name)
except APIError as e:
self.client.fail(to_native(e))
self.actions.append("Disable plugin %s" % self.preferred_name)
self.changed = True
else:
self.client.fail("Plugin not found: Plugin does not exist.")
@property
def result(self):
plugin_data = {}
if self.parameters.state != 'absent':
try:
plugin_data = self.client.get_json('/plugins/{0}/json', self.preferred_name)
except NotFound:
# This can happen in check mode
pass
result = {
'actions': self.actions,
'changed': self.changed,
'diff': self.diff,
'plugin': plugin_data,
}
return dict((k, v) for k, v in result.items() if v is not None)
def main():
argument_spec = dict(
alias=dict(type='str'),
plugin_name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent', 'enable', 'disable']),
plugin_options=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
force_remove=dict(type='bool', default=False),
enable_timeout=dict(type='int', default=0),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
cm = DockerPluginManager(client)
client.module.exit_json(**cm.result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,348 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_prune
short_description: Allows to prune various docker objects
description:
- Allows to run C(docker container prune), C(docker image prune), C(docker network prune) and C(docker volume prune) through
the Docker API.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
idempotent:
support: full
options:
containers:
description:
- Whether to prune containers.
type: bool
default: false
containers_filters:
description:
- A dictionary of filter values used for selecting containers to delete.
- 'For example, C(until: 24h).'
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) for
more information on possible filters.
type: dict
images:
description:
- Whether to prune images.
type: bool
default: false
images_filters:
description:
- A dictionary of filter values used for selecting images to delete.
- 'For example, C(dangling: true).'
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) for more
information on possible filters.
type: dict
networks:
description:
- Whether to prune networks.
type: bool
default: false
networks_filters:
description:
- A dictionary of filter values used for selecting networks to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) for
more information on possible filters.
type: dict
volumes:
description:
- Whether to prune volumes.
type: bool
default: false
volumes_filters:
description:
- A dictionary of filter values used for selecting volumes to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) for more
information on possible filters.
type: dict
builder_cache:
description:
- Whether to prune the builder cache.
type: bool
default: false
builder_cache_all:
description:
- Whether to remove all types of build cache.
type: bool
default: false
version_added: 3.10.0
builder_cache_filters:
description:
- A dictionary of filter values used for selecting images to delete.
- 'For example, C(until: 10m).'
- See L(the API documentation,https://docs.docker.com/engine/api/v1.44/#tag/Image/operation/BuildPrune) for more information
on possible filters.
type: dict
version_added: 3.10.0
builder_cache_keep_storage:
description:
- Amount of disk space to keep for cache in format C(<number>[<unit>]).".
- Number is a positive integer. Unit can be one of V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte),
V(T) (tebibyte), or V(P) (pebibyte).
- Omitting the unit defaults to bytes.
type: str
version_added: 3.10.0
author:
- "Felix Fontein (@felixfontein)"
notes:
- The module always returned C(changed=false) before community.docker 3.5.1.
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Prune containers older than 24h
community.docker.docker_prune:
containers: true
containers_filters:
# only consider containers created more than 24 hours ago
until: 24h
- name: Prune containers with labels
community.docker.docker_prune:
containers: true
containers_filters:
# Prune containers whose "foo" label has value "bar", and
# whose "bam" label has value "baz". If you only want to
# compare one label, you can provide it as a string instead
# of a list with one element.
label:
- foo=bar
- bam=baz
# Prune containers whose label "bar" does *not* have value
# "baz". If you want to avoid more than one label, you can
# provide a list of multiple label-value pairs.
"label!": bar=baz
- name: Prune everything
community.docker.docker_prune:
containers: true
images: true
networks: true
volumes: true
builder_cache: true
- name: Prune everything (including non-dangling images)
community.docker.docker_prune:
containers: true
images: true
images_filters:
dangling: false
networks: true
volumes: true
builder_cache: true
"""
RETURN = r"""
# containers
containers:
description:
- List of IDs of deleted containers.
returned: O(containers=true)
type: list
elements: str
sample: []
containers_space_reclaimed:
description:
- Amount of reclaimed disk space from container pruning in bytes.
returned: O(containers=true)
type: int
sample: 0
# images
images:
description:
- List of IDs of deleted images.
returned: O(images=true)
type: list
elements: str
sample: []
images_space_reclaimed:
description:
- Amount of reclaimed disk space from image pruning in bytes.
returned: O(images=true)
type: int
sample: 0
# networks
networks:
description:
- List of IDs of deleted networks.
returned: O(networks=true)
type: list
elements: str
sample: []
# volumes
volumes:
description:
- List of IDs of deleted volumes.
returned: O(volumes=true)
type: list
elements: str
sample: []
volumes_space_reclaimed:
description:
- Amount of reclaimed disk space from volumes pruning in bytes.
returned: O(volumes=true)
type: int
sample: 0
# builder_cache
builder_cache_space_reclaimed:
description:
- Amount of reclaimed disk space from builder cache pruning in bytes.
returned: O(builder_cache=true)
type: int
sample: 0
builder_cache_caches_deleted:
description:
- The build caches that were deleted.
returned: O(builder_cache=true) and API version is 1.39 or later
type: list
elements: str
sample: []
version_added: 3.10.0
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import clean_dict_booleans_for_docker_api
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
def main():
argument_spec = dict(
containers=dict(type='bool', default=False),
containers_filters=dict(type='dict'),
images=dict(type='bool', default=False),
images_filters=dict(type='dict'),
networks=dict(type='bool', default=False),
networks_filters=dict(type='dict'),
volumes=dict(type='bool', default=False),
volumes_filters=dict(type='dict'),
builder_cache=dict(type='bool', default=False),
builder_cache_all=dict(type='bool', default=False),
builder_cache_filters=dict(type='dict'),
builder_cache_keep_storage=dict(type='str'), # convert to bytes
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
option_minimal_versions=dict(
builder_cache=dict(docker_py_version='1.31'),
builder_cache_all=dict(docker_py_version='1.39'),
builder_cache_filters=dict(docker_py_version='1.31'),
builder_cache_keep_storage=dict(docker_py_version='1.39'),
),
# supports_check_mode=True,
)
builder_cache_keep_storage = None
if client.module.params.get('builder_cache_keep_storage') is not None:
try:
builder_cache_keep_storage = human_to_bytes(client.module.params.get('builder_cache_keep_storage'))
except ValueError as exc:
client.module.fail_json(msg='Error while parsing value of builder_cache_keep_storage: {0}'.format(exc))
try:
result = dict()
changed = False
if client.module.params['containers']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'), allow_sequences=True)
params = {'filters': convert_filters(filters)}
res = client.post_to_json('/containers/prune', params=params)
result['containers'] = res.get('ContainersDeleted') or []
result['containers_space_reclaimed'] = res['SpaceReclaimed']
if result['containers'] or result['containers_space_reclaimed']:
changed = True
if client.module.params['images']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'), allow_sequences=True)
params = {'filters': convert_filters(filters)}
res = client.post_to_json('/images/prune', params=params)
result['images'] = res.get('ImagesDeleted') or []
result['images_space_reclaimed'] = res['SpaceReclaimed']
if result['images'] or result['images_space_reclaimed']:
changed = True
if client.module.params['networks']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'), allow_sequences=True)
params = {'filters': convert_filters(filters)}
res = client.post_to_json('/networks/prune', params=params)
result['networks'] = res.get('NetworksDeleted') or []
if result['networks']:
changed = True
if client.module.params['volumes']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'), allow_sequences=True)
params = {'filters': convert_filters(filters)}
res = client.post_to_json('/volumes/prune', params=params)
result['volumes'] = res.get('VolumesDeleted') or []
result['volumes_space_reclaimed'] = res['SpaceReclaimed']
if result['volumes'] or result['volumes_space_reclaimed']:
changed = True
if client.module.params['builder_cache']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('builder_cache_filters'), allow_sequences=True)
params = {'filters': convert_filters(filters)}
if client.module.params.get('builder_cache_all'):
params['all'] = 'true'
if builder_cache_keep_storage is not None:
params['keep-storage'] = builder_cache_keep_storage
res = client.post_to_json('/build/prune', params=params)
result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
if result['builder_cache_space_reclaimed']:
changed = True
if 'CachesDeleted' in res:
# API version 1.39+: return value CachesDeleted (list of str)
result['builder_cache_caches_deleted'] = res['CachesDeleted']
if result['builder_cache_caches_deleted']:
changed = True
result['changed'] = changed
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,408 @@
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_secret
short_description: Manage docker secrets
description:
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
- Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used in
future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated unless
the O(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_2_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: none
idempotent:
support: partial
details:
- If O(force=true) the module is not idempotent.
options:
data:
description:
- The value of the secret.
- Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present).
type: str
data_is_b64:
description:
- If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used.
- To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option.
type: bool
default: false
data_src:
description:
- The file on the target from which to read the secret.
- Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present).
type: path
version_added: 1.10.0
labels:
description:
- A map of key:value meta data, where both key and value are expected to be strings.
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating
it again.
type: dict
force:
description:
- Use with O(state=present) to always remove and recreate an existing secret.
- If V(true), an existing secret will be replaced, even if it has not changed.
type: bool
default: false
rolling_versions:
description:
- If set to V(true), secrets are created with an increasing version number appended to their name.
- Adds a label containing the version number to the managed secrets with the name C(ansible_version).
type: bool
default: false
version_added: 2.2.0
versions_to_keep:
description:
- When using O(rolling_versions), the number of old versions of the secret to keep.
- Extraneous old secrets are deleted after the new one is created.
- Set to V(-1) to keep everything or to V(0) or V(1) to keep only the current one.
type: int
default: 5
version_added: 2.2.0
name:
description:
- The name of the secret.
type: str
required: true
state:
description:
- Set to V(present), if the secret should exist, and V(absent), if it should not.
type: str
default: present
choices:
- absent
- present
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
"""
EXAMPLES = r"""
- name: Create secret foo (from a file on the control machine)
community.docker.docker_secret:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
data_is_b64: true
state: present
- name: Create secret foo (from a file on the target machine)
community.docker.docker_secret:
name: foo
data_src: /path/to/secret/file
state: present
- name: Change the secret data
community.docker.docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
community.docker.docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
community.docker.docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
community.docker.docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
community.docker.docker_secret:
name: foo
data: Goodnight everyone!
force: true
state: present
- name: Remove secret foo
community.docker.docker_secret:
name: foo
state: absent
"""
RETURN = r"""
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success and O(state=present)
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
secret_name:
description:
- The name of the created secret object.
returned: success and O(state=present)
type: str
sample: 'awesome_secret'
version_added: 2.2.0
"""
import base64
import hashlib
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils.common import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
compare_generic,
sanitize_labels,
)
from ansible.module_utils.common.text.converters import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
data_src = parameters.get('data_src')
if data_src is not None:
try:
with open(data_src, 'rb') as f:
self.data = f.read()
except Exception as exc:
self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.rolling_versions = parameters.get('rolling_versions')
self.versions_to_keep = parameters.get('versions_to_keep')
if self.rolling_versions:
self.version = 0
self.data_key = None
self.secrets = []
def __call__(self):
self.get_secret()
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
self.remove_old_versions()
elif self.state == 'absent':
self.absent()
def get_version(self, secret):
try:
return int(secret.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
except ValueError:
return 0
def remove_old_versions(self):
if not self.rolling_versions or self.versions_to_keep < 0:
return
if not self.check_mode:
while len(self.secrets) > max(self.versions_to_keep, 1):
self.remove_secret(self.secrets.pop(0))
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
if self.rolling_versions:
self.secrets = [
secret
for secret in secrets
if secret['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
]
self.secrets.sort(key=self.get_version)
else:
self.secrets = [
secret for secret in secrets if secret['Spec']['Name'] == self.name
]
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We cannot see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.rolling_versions:
self.version += 1
labels['ansible_version'] = str(self.version)
self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
self.secrets += self.client.secrets(filters={'id': secret_id})
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def remove_secret(self, secret):
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc)))
def present(self):
''' Handles state == 'present', creating or updating the secret '''
if self.secrets:
secret = self.secrets[-1]
self.results['secret_id'] = secret['ID']
self.results['secret_name'] = secret['Spec']['Name']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
else:
if not self.force:
self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'true'")
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if self.rolling_versions:
self.version = self.get_version(secret)
if data_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
if not self.rolling_versions:
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
self.results['secret_name'] = self.name
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
self.results['secret_name'] = self.name
def absent(self):
''' Handles state == 'absent', removing the secret '''
if self.secrets:
for secret in self.secrets:
self.remove_secret(secret)
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str', no_log=True),
data_is_b64=dict(type='bool', default=False),
data_src=dict(type='path'),
labels=dict(type='dict'),
force=dict(type='bool', default=False),
rolling_versions=dict(type='bool', default=False),
versions_to_keep=dict(type='int', default=5),
)
required_if = [
('state', 'present', ['data', 'data_src'], True),
]
mutually_exclusive = [
('data', 'data_src'),
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
min_docker_version='2.1.0',
)
sanitize_labels(client.module.params['labels'], 'labels', client)
try:
results = dict(
changed=False,
secret_id='',
secret_name=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,348 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_stack
author: "Dario Zanzico (@dariko)"
short_description: docker stack module
description:
- Manage docker stacks using the C(docker stack) command on the target node (see examples).
extends_documentation_fragment:
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: none
diff_mode:
support: none
action_group:
version_added: 3.6.0
idempotent:
support: full
options:
name:
description:
- Stack name.
type: str
required: true
state:
description:
- Service state.
type: str
default: "present"
choices:
- present
- absent
compose:
description:
- List of compose definitions. Any element may be a string referring to the path of the compose file on the target host
or the YAML contents of a compose file nested as dictionary.
type: list
elements: raw
default: []
prune:
description:
- If true will add the C(--prune) option to the C(docker stack deploy) command. This will have docker remove the services
not present in the current stack definition.
type: bool
default: false
detach:
description:
- If V(false), the C(--detach=false) option is added to the C(docker stack deploy) command, allowing Docker to wait
for tasks to converge before exiting.
- If V(true) (default), Docker exits immediately instead of waiting for tasks to converge.
type: bool
default: true
version_added: 4.1.0
with_registry_auth:
description:
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command. This will have docker send
registry authentication details to Swarm agents.
type: bool
default: false
resolve_image:
description:
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command. This will have docker query the
registry to resolve image digest and supported platforms. If not set, docker use "always" by default.
type: str
choices: ["always", "changed", "never"]
absent_retries:
description:
- If larger than V(0) and O(state=absent) the module will retry up to O(absent_retries) times to delete the stack until
all the resources have been effectively deleted. If the last try still reports the stack as not completely removed
the module will fail.
type: int
default: 0
absent_retries_interval:
description:
- Interval in seconds between consecutive O(absent_retries).
type: int
default: 1
docker_cli:
version_added: 3.6.0
docker_host:
version_added: 3.6.0
tls_hostname:
version_added: 3.6.0
api_version:
version_added: 3.6.0
ca_path:
version_added: 3.6.0
client_cert:
version_added: 3.6.0
client_key:
version_added: 3.6.0
tls:
version_added: 3.6.0
validate_certs:
version_added: 3.6.0
cli_context:
version_added: 3.6.0
requirements:
- Docker CLI tool C(docker)
- jsondiff
- pyyaml
"""
RETURN = r"""
stack_spec_diff:
description: |-
Dictionary containing the differences between the 'Spec' field
of the stack services before and after applying the new stack
definition.
sample: >
"stack_spec_diff":
{'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
returned: on change
type: dict
"""
EXAMPLES = r"""
- name: Deploy stack from a compose file
community.docker.docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- name: Deploy stack from base compose file and override the web service
community.docker.docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- version: '3'
services:
web:
image: nginx:latest
environment:
ENVVAR: envvar
- name: Remove stack
community.docker.docker_stack:
name: mystack
state: absent
"""
import json
import os
import tempfile
import traceback
from ansible.module_utils.six import string_types
from time import sleep
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
try:
from jsondiff import diff as json_diff
HAS_JSONDIFF = True
except ImportError:
HAS_JSONDIFF = False
try:
from yaml import dump as yaml_dump
HAS_YAML = True
except ImportError:
HAS_YAML = False
def docker_stack_services(client, stack_name):
rc, out, err = client.call_cli("stack", "services", stack_name, "--format", "{{.Name}}")
if to_native(err) == "Nothing found in stack: %s\n" % stack_name:
return []
return to_native(out).strip().split('\n')
def docker_service_inspect(client, service_name):
rc, out, err = client.call_cli("service", "inspect", service_name)
if rc != 0:
return None
else:
ret = json.loads(out)[0]['Spec']
return ret
def docker_stack_deploy(client, stack_name, compose_files):
command = ["stack", "deploy"]
if client.module.params["prune"]:
command += ["--prune"]
if not client.module.params["detach"]:
command += ["--detach=false"]
if client.module.params["with_registry_auth"]:
command += ["--with-registry-auth"]
if client.module.params["resolve_image"]:
command += ["--resolve-image",
client.module.params["resolve_image"]]
for compose_file in compose_files:
command += ["--compose-file",
compose_file]
command += [stack_name]
rc, out, err = client.call_cli(*command)
return rc, to_native(out), to_native(err)
def docker_stack_inspect(client, stack_name):
ret = {}
for service_name in docker_stack_services(client, stack_name):
ret[service_name] = docker_service_inspect(client, service_name)
return ret
def docker_stack_rm(client, stack_name, retries, interval):
command = ["stack", "rm", stack_name]
if not client.module.params["detach"]:
command += ["--detach=false"]
rc, out, err = client.call_cli(*command)
while to_native(err) != "Nothing found in stack: %s\n" % stack_name and retries > 0:
sleep(interval)
retries = retries - 1
rc, out, err = client.call_cli(*command)
return rc, to_native(out), to_native(err)
def main():
client = AnsibleModuleDockerClient(
argument_spec={
'name': dict(type='str', required=True),
'compose': dict(type='list', elements='raw', default=[]),
'prune': dict(type='bool', default=False),
'detach': dict(type='bool', default=True),
'with_registry_auth': dict(type='bool', default=False),
'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
'state': dict(type='str', default='present', choices=['present', 'absent']),
'absent_retries': dict(type='int', default=0),
'absent_retries_interval': dict(type='int', default=1)
},
supports_check_mode=False,
)
if not HAS_JSONDIFF:
return client.fail("jsondiff is not installed, try 'pip install jsondiff'")
if not HAS_YAML:
return client.fail("yaml is not installed, try 'pip install pyyaml'")
try:
state = client.module.params['state']
compose = client.module.params['compose']
name = client.module.params['name']
absent_retries = client.module.params['absent_retries']
absent_retries_interval = client.module.params['absent_retries_interval']
if state == 'present':
if not compose:
client.fail("compose parameter must be a list containing at least one element")
compose_files = []
for i, compose_def in enumerate(compose):
if isinstance(compose_def, dict):
compose_file_fd, compose_file = tempfile.mkstemp()
client.module.add_cleanup_file(compose_file)
with os.fdopen(compose_file_fd, 'w') as stack_file:
compose_files.append(compose_file)
stack_file.write(yaml_dump(compose_def))
elif isinstance(compose_def, string_types):
compose_files.append(compose_def)
else:
client.fail("compose element '%s' must be a string or a dictionary" % compose_def)
before_stack_services = docker_stack_inspect(client, name)
rc, out, err = docker_stack_deploy(client, name, compose_files)
after_stack_services = docker_stack_inspect(client, name)
if rc != 0:
client.fail("docker stack up deploy command failed", rc=rc, stdout=out, stderr=err)
before_after_differences = json_diff(before_stack_services, after_stack_services)
for k in before_after_differences.keys():
if isinstance(before_after_differences[k], dict):
before_after_differences[k].pop('UpdatedAt', None)
before_after_differences[k].pop('Version', None)
if not list(before_after_differences[k].keys()):
before_after_differences.pop(k)
if not before_after_differences:
client.module.exit_json(
changed=False,
rc=rc,
stdout=out,
stderr=err,
)
else:
client.module.exit_json(
changed=True,
rc=rc,
stdout=out,
stderr=err,
stack_spec_diff=json_diff(
before_stack_services,
after_stack_services,
dump=True,
),
)
else:
if docker_stack_services(client, name):
rc, out, err = docker_stack_rm(client, name, absent_retries, absent_retries_interval)
if rc != 0:
client.module.fail_json(
msg="'docker stack down' command failed",
rc=rc,
stdout=out,
stderr=err,
)
else:
client.module.exit_json(
changed=True,
msg=out,
rc=rc,
stdout=out,
stderr=err,
)
client.module.exit_json(changed=False)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@ -0,0 +1,118 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_stack_info
author: "Jose Angel Munoz (@imjoseangel)"
short_description: Return information on all docker stacks
description:
- Retrieve information on docker stacks using the C(docker stack) command on the target node (see examples).
requirements:
- Docker CLI tool C(docker)
extends_documentation_fragment:
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
attributes:
action_group:
version_added: 3.6.0
options:
docker_cli:
version_added: 3.6.0
docker_host:
version_added: 3.6.0
tls_hostname:
version_added: 3.6.0
api_version:
version_added: 3.6.0
ca_path:
version_added: 3.6.0
client_cert:
version_added: 3.6.0
client_key:
version_added: 3.6.0
tls:
version_added: 3.6.0
validate_certs:
version_added: 3.6.0
cli_context:
version_added: 3.6.0
seealso:
- module: community.docker.docker_stack_task_info
description: >-
To retrieve detailed information about the services under a specific stack use the M(community.docker.docker_stack_task_info)
module.
"""
RETURN = r"""
results:
description:
- List of dictionaries containing the list of stacks on the target node.
sample:
- {"name": "grafana", "namespace": "default", "orchestrator": "Kubernetes", "services": "2"}
returned: always
type: list
elements: dict
"""
EXAMPLES = r"""
- name: Shows stack info
community.docker.docker_stack_info:
register: result
- name: Show results
ansible.builtin.debug:
var: result.results
"""
import json
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
def docker_stack_list(module):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command(
[docker_bin, "stack", "ls", "--format={{json .}}"])
return rc, out.strip(), err.strip()
def main():
client = AnsibleModuleDockerClient(
argument_spec={
},
supports_check_mode=True,
)
try:
rc, ret, stderr = client.call_cli_json_stream('stack', 'ls', '--format={{json .}}', check_rc=True)
client.module.exit_json(
changed=False,
rc=rc,
stdout='\n'.join([json.dumps(entry) for entry in ret]),
stderr=to_native(stderr).strip(),
results=ret,
)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@ -0,0 +1,122 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_stack_task_info
author: "Jose Angel Munoz (@imjoseangel)"
short_description: Return information of the tasks on a docker stack
description:
- Retrieve information on docker stacks tasks using the C(docker stack) command on the target node (see examples).
extends_documentation_fragment:
- community.docker.docker.cli_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
attributes:
action_group:
version_added: 3.6.0
options:
name:
description:
- Stack name.
type: str
required: true
docker_cli:
version_added: 3.6.0
docker_host:
version_added: 3.6.0
tls_hostname:
version_added: 3.6.0
api_version:
version_added: 3.6.0
ca_path:
version_added: 3.6.0
client_cert:
version_added: 3.6.0
client_key:
version_added: 3.6.0
tls:
version_added: 3.6.0
validate_certs:
version_added: 3.6.0
cli_context:
version_added: 3.6.0
requirements:
- Docker CLI tool C(docker)
"""
RETURN = r"""
results:
description:
- List of dictionaries containing the list of tasks associated to a stack name.
sample:
- {"CurrentState": "Running", "DesiredState": "Running", "Error": "", "ID": "7wqv6m02ugkw", "Image": "busybox", "Name": "test_stack.1",
"Node": "swarm", "Ports": ""}
returned: always
type: list
elements: dict
"""
EXAMPLES = r"""
- name: Shows stack info
community.docker.docker_stack_task_info:
name: test_stack
register: result
- name: Show results
ansible.builtin.debug:
var: result.results
"""
import json
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_cli import (
AnsibleModuleDockerClient,
DockerException,
)
def docker_stack_task(module, stack_name):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command(
[docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
return rc, out.strip(), err.strip()
def main():
client = AnsibleModuleDockerClient(
argument_spec={
'name': dict(type='str', required=True)
},
supports_check_mode=True,
)
try:
name = client.module.params['name']
rc, ret, stderr = client.call_cli_json_stream('stack', 'ps', name, '--format={{json .}}', check_rc=True)
client.module.exit_json(
changed=False,
rc=rc,
stdout='\n'.join([json.dumps(entry) for entry in ret]),
stderr=to_native(stderr).strip(),
results=ret,
)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
if __name__ == "__main__":
main()

View File

@ -0,0 +1,718 @@
#!/usr/bin/python
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_swarm
short_description: Manage Swarm cluster
description:
- Create a new Swarm cluster.
- Add/Remove nodes or managers to an existing cluster.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: full
options:
advertise_addr:
description:
- Externally reachable address advertised to other nodes.
- This can either be an address/port combination in the form V(192.168.1.1:4567), or an interface followed by a port
number, like V(eth0:4567).
- If the port number is omitted, the port number from the listen address is used.
- If O(advertise_addr) is not specified, it will be automatically detected when possible.
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
type: str
default_addr_pool:
description:
- Default address pool in CIDR format.
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
- Requires API version >= 1.39.
type: list
elements: str
subnet_size:
description:
- Default address pool subnet mask length.
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
- Requires API version >= 1.39.
type: int
listen_addr:
description:
- Listen address used for inter-manager communication.
- This can either be an address/port combination in the form V(192.168.1.1:4567), or an interface followed by a port
number, like V(eth0:4567).
- If the port number is omitted, the default swarm listening port is used.
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
type: str
default: 0.0.0.0:2377
force:
description:
- Use with state V(present) to force creating a new Swarm, even if already part of one.
- Use with state V(absent) to Leave the swarm even if this node is a manager.
type: bool
default: false
state:
description:
- Set to V(present), to create/update a new cluster.
- Set to V(join), to join an existing cluster.
- Set to V(absent), to leave an existing cluster.
- Set to V(remove), to remove an absent node from the cluster. Note that removing requires Docker SDK for Python >=
2.4.0.
- M(community.docker.docker_node) can be used to demote a manager before removal.
type: str
default: present
choices:
- present
- join
- absent
- remove
node_id:
description:
- Swarm id of the node to remove.
- Used with O(state=remove).
type: str
join_token:
description:
- Swarm token used to join a swarm cluster.
- Used with O(state=join).
- If this value is specified, the corresponding value in the return values will be censored by Ansible. This is a side-effect
of this value not being logged.
type: str
remote_addrs:
description:
- Remote address of one or more manager nodes of an existing Swarm to connect to.
- Used with O(state=join).
type: list
elements: str
task_history_retention_limit:
description:
- Maximum number of tasks history stored.
- Docker default value is V(5).
type: int
snapshot_interval:
description:
- Number of logs entries between snapshot.
- Docker default value is V(10000).
type: int
keep_old_snapshots:
description:
- Number of snapshots to keep beyond the current snapshot.
- Docker default value is V(0).
type: int
log_entries_for_slow_followers:
description:
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
type: int
heartbeat_tick:
description:
- Amount of ticks (in seconds) between each heartbeat.
- Docker default value is V(1) seconds.
type: int
election_tick:
description:
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
- Docker default value is V(10) seconds.
type: int
dispatcher_heartbeat_period:
description:
- The delay (in nanoseconds) for an agent to send a heartbeat to the dispatcher.
- Docker default value is 5 seconds, which corresponds to a value of V(5000000000).
type: int
node_cert_expiry:
description:
- Automatic expiry for nodes certificates, given in nanoseconds.
- Docker default value is 90 days, which corresponds to a value of V(7776000000000000).
type: int
name:
description:
- The name of the swarm.
type: str
labels:
description:
- User-defined key/value metadata.
- Label operations in this module apply to the docker swarm cluster. Use M(community.docker.docker_node) module to add/modify/remove
swarm node labels.
- Requires API version >= 1.32.
type: dict
signing_ca_cert:
description:
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
- This must not be a path to a certificate, but the contents of the certificate.
- Requires API version >= 1.30.
type: str
signing_ca_key:
description:
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
- This must not be a path to a key, but the contents of the key.
- Requires API version >= 1.30.
type: str
ca_force_rotate:
description:
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified.
- Docker default value is V(0).
- Requires API version >= 1.30.
type: int
autolock_managers:
description:
- If set, generate a key and use it to lock data stored on the managers.
- Docker default value is V(false).
- M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
type: bool
rotate_worker_token:
description: Rotate the worker join token.
type: bool
default: false
rotate_manager_token:
description: Rotate the manager join token.
type: bool
default: false
data_path_addr:
description:
- Address or interface to use for data path traffic.
- This can either be an address in the form V(192.168.1.1), or an interface, like V(eth0).
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
- Requires API version >= 1.30.
type: str
version_added: 2.5.0
data_path_port:
description:
- Port to use for data path traffic.
- This needs to be a port number like V(9789).
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
- Requires API version >= 1.40.
type: int
version_added: 3.1.0
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
- Docker API >= 1.25
author:
- Thierry Bouvet (@tbouvet)
- Piotr Wojciechowski (@WojciechowskiPiotr)
"""
EXAMPLES = r"""
- name: Init a new swarm with default parameters
community.docker.docker_swarm:
state: present
- name: Update swarm configuration
community.docker.docker_swarm:
state: present
election_tick: 5
- name: Add nodes
community.docker.docker_swarm:
state: join
advertise_addr: 192.168.1.2
join_token: SWMTKN-1--xxxxx
remote_addrs: ['192.168.1.1:2377']
- name: Leave swarm for a node
community.docker.docker_swarm:
state: absent
- name: Remove a swarm manager
community.docker.docker_swarm:
state: absent
force: true
- name: Remove node from swarm
community.docker.docker_swarm:
state: remove
node_id: mynode
- name: Init a new swarm with different data path interface
community.docker.docker_swarm:
state: present
advertise_addr: eth0
data_path_addr: ens10
- name: Init a new swarm with a different data path port
community.docker.docker_swarm:
state: present
data_path_port: 9789
"""
RETURN = r"""
swarm_facts:
description: Information about swarm.
returned: success
type: dict
contains:
JoinTokens:
description: Tokens to connect to the Swarm.
returned: success
type: dict
contains:
Worker:
description:
- Token to join the cluster as a new *worker* node.
- B(Note:) if this value has been specified as O(join_token), the value here will not be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
If you pass O(join_token), make sure your playbook/role does not depend on this return value!
returned: success
type: str
example: SWMTKN-1--xxxxx
Manager:
description:
- Token to join the cluster as a new *manager* node.
- B(Note:) if this value has been specified as O(join_token), the value here will not be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
If you pass O(join_token), make sure your playbook/role does not depend on this return value!
returned: success
type: str
example: SWMTKN-1--xxxxx
UnlockKey:
description: The swarm unlock-key if O(autolock_managers=true).
returned: on success if O(autolock_managers=true) and swarm is initialised, or if O(autolock_managers) has changed.
type: str
example: SWMKEY-1-xxx
actions:
description: Provides the actions done on the swarm.
returned: when action failed.
type: list
elements: str
example: ['This cluster is already a swarm cluster']
"""
import json
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils.common import (
DockerBaseClass,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DifferenceTracker,
sanitize_labels,
)
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
from ansible.module_utils.common.text.converters import to_native
class TaskParameters(DockerBaseClass):
def __init__(self):
super(TaskParameters, self).__init__()
self.advertise_addr = None
self.listen_addr = None
self.remote_addrs = None
self.join_token = None
self.data_path_addr = None
self.data_path_port = None
# Spec
self.snapshot_interval = None
self.task_history_retention_limit = None
self.keep_old_snapshots = None
self.log_entries_for_slow_followers = None
self.heartbeat_tick = None
self.election_tick = None
self.dispatcher_heartbeat_period = None
self.node_cert_expiry = None
self.name = None
self.labels = None
self.log_driver = None
self.signing_ca_cert = None
self.signing_ca_key = None
self.ca_force_rotate = None
self.autolock_managers = None
self.rotate_worker_token = None
self.rotate_manager_token = None
self.default_addr_pool = None
self.subnet_size = None
@staticmethod
def from_ansible_params(client):
result = TaskParameters()
for key, value in client.module.params.items():
if key in result.__dict__:
setattr(result, key, value)
result.update_parameters(client)
return result
def update_from_swarm_info(self, swarm_info):
spec = swarm_info['Spec']
ca_config = spec.get('CAConfig') or dict()
if self.node_cert_expiry is None:
self.node_cert_expiry = ca_config.get('NodeCertExpiry')
if self.ca_force_rotate is None:
self.ca_force_rotate = ca_config.get('ForceRotate')
dispatcher = spec.get('Dispatcher') or dict()
if self.dispatcher_heartbeat_period is None:
self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
raft = spec.get('Raft') or dict()
if self.snapshot_interval is None:
self.snapshot_interval = raft.get('SnapshotInterval')
if self.keep_old_snapshots is None:
self.keep_old_snapshots = raft.get('KeepOldSnapshots')
if self.heartbeat_tick is None:
self.heartbeat_tick = raft.get('HeartbeatTick')
if self.log_entries_for_slow_followers is None:
self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
if self.election_tick is None:
self.election_tick = raft.get('ElectionTick')
orchestration = spec.get('Orchestration') or dict()
if self.task_history_retention_limit is None:
self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
encryption_config = spec.get('EncryptionConfig') or dict()
if self.autolock_managers is None:
self.autolock_managers = encryption_config.get('AutoLockManagers')
if self.name is None:
self.name = spec['Name']
if self.labels is None:
self.labels = spec.get('Labels') or {}
if 'LogDriver' in spec['TaskDefaults']:
self.log_driver = spec['TaskDefaults']['LogDriver']
def update_parameters(self, client):
assign = dict(
snapshot_interval='snapshot_interval',
task_history_retention_limit='task_history_retention_limit',
keep_old_snapshots='keep_old_snapshots',
log_entries_for_slow_followers='log_entries_for_slow_followers',
heartbeat_tick='heartbeat_tick',
election_tick='election_tick',
dispatcher_heartbeat_period='dispatcher_heartbeat_period',
node_cert_expiry='node_cert_expiry',
name='name',
labels='labels',
signing_ca_cert='signing_ca_cert',
signing_ca_key='signing_ca_key',
ca_force_rotate='ca_force_rotate',
autolock_managers='autolock_managers',
log_driver='log_driver',
)
params = dict()
for dest, source in assign.items():
if not client.option_minimal_versions[source]['supported']:
continue
value = getattr(self, source)
if value is not None:
params[dest] = value
self.spec = client.create_swarm_spec(**params)
def compare_to_active(self, other, client, differences):
for k in self.__dict__:
if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
'rotate_worker_token', 'rotate_manager_token', 'spec',
'default_addr_pool', 'subnet_size', 'data_path_addr',
'data_path_port'):
continue
if not client.option_minimal_versions[k]['supported']:
continue
value = getattr(self, k)
if value is None:
continue
other_value = getattr(other, k)
if value != other_value:
differences.add(k, parameter=value, active=other_value)
if self.rotate_worker_token:
differences.add('rotate_worker_token', parameter=True, active=False)
if self.rotate_manager_token:
differences.add('rotate_manager_token', parameter=True, active=False)
return differences
class SwarmManager(DockerBaseClass):
def __init__(self, client, results):
super(SwarmManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
self.swarm_info = {}
self.state = client.module.params['state']
self.force = client.module.params['force']
self.node_id = client.module.params['node_id']
self.differences = DifferenceTracker()
self.parameters = TaskParameters.from_ansible_params(client)
self.created = False
def __call__(self):
choice_map = {
"present": self.init_swarm,
"join": self.join,
"absent": self.leave,
"remove": self.remove,
}
choice_map.get(self.state)()
if self.client.module._diff or self.parameters.debug:
diff = dict()
diff['before'], diff['after'] = self.differences.get_before_after()
self.results['diff'] = diff
def inspect_swarm(self):
try:
data = self.client.inspect_swarm()
json_str = json.dumps(data, ensure_ascii=False)
self.swarm_info = json.loads(json_str)
self.results['changed'] = False
self.results['swarm_facts'] = self.swarm_info
unlock_key = self.get_unlock_key()
self.swarm_info.update(unlock_key)
except APIError:
return
def get_unlock_key(self):
default = {'UnlockKey': None}
if not self.has_swarm_lock_changed():
return default
try:
return self.client.get_unlock_key() or default
except APIError:
return default
def has_swarm_lock_changed(self):
return self.parameters.autolock_managers and (
self.created or self.differences.has_difference_for('autolock_managers')
)
def init_swarm(self):
if not self.force and self.client.check_if_swarm_manager():
self.__update_swarm()
return
if not self.check_mode:
init_arguments = {
'advertise_addr': self.parameters.advertise_addr,
'listen_addr': self.parameters.listen_addr,
'force_new_cluster': self.force,
'swarm_spec': self.parameters.spec,
}
if self.parameters.default_addr_pool is not None:
init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
if self.parameters.subnet_size is not None:
init_arguments['subnet_size'] = self.parameters.subnet_size
if self.parameters.data_path_addr is not None:
init_arguments['data_path_addr'] = self.parameters.data_path_addr
if self.parameters.data_path_port is not None:
init_arguments['data_path_port'] = self.parameters.data_path_port
try:
self.client.init_swarm(**init_arguments)
except APIError as exc:
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
if not self.client.check_if_swarm_manager():
if not self.check_mode:
self.client.fail("Swarm not created or other error!")
self.created = True
self.inspect_swarm()
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
self.differences.add('state', parameter='present', active='absent')
self.results['changed'] = True
self.results['swarm_facts'] = {
'JoinTokens': self.swarm_info.get('JoinTokens'),
'UnlockKey': self.swarm_info.get('UnlockKey')
}
def __update_swarm(self):
try:
self.inspect_swarm()
version = self.swarm_info['Version']['Index']
self.parameters.update_from_swarm_info(self.swarm_info)
old_parameters = TaskParameters()
old_parameters.update_from_swarm_info(self.swarm_info)
self.parameters.compare_to_active(old_parameters, self.client, self.differences)
if self.differences.empty:
self.results['actions'].append("No modification")
self.results['changed'] = False
return
update_parameters = TaskParameters.from_ansible_params(self.client)
update_parameters.update_parameters(self.client)
if not self.check_mode:
self.client.update_swarm(
version=version, swarm_spec=update_parameters.spec,
rotate_worker_token=self.parameters.rotate_worker_token,
rotate_manager_token=self.parameters.rotate_manager_token)
except APIError as exc:
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
return
self.inspect_swarm()
self.results['actions'].append("Swarm cluster updated")
self.results['changed'] = True
def join(self):
if self.client.check_if_swarm_node():
self.results['actions'].append("This node is already part of a swarm.")
return
if not self.check_mode:
join_arguments = {
'remote_addrs': self.parameters.remote_addrs,
'join_token': self.parameters.join_token,
'listen_addr': self.parameters.listen_addr,
'advertise_addr': self.parameters.advertise_addr,
}
if self.parameters.data_path_addr is not None:
join_arguments['data_path_addr'] = self.parameters.data_path_addr
try:
self.client.join_swarm(**join_arguments)
except APIError as exc:
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("New node is added to swarm cluster")
self.differences.add('joined', parameter=True, active=False)
self.results['changed'] = True
def leave(self):
if not self.client.check_if_swarm_node():
self.results['actions'].append("This node is not part of a swarm.")
return
if not self.check_mode:
try:
self.client.leave_swarm(force=self.force)
except APIError as exc:
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node has left the swarm cluster")
self.differences.add('joined', parameter='absent', active='present')
self.results['changed'] = True
def remove(self):
if not self.client.check_if_swarm_manager():
self.client.fail("This node is not a manager.")
try:
status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
except APIError:
return
if not status_down:
self.client.fail("Can not remove the node. The status node is ready and not down.")
if not self.check_mode:
try:
self.client.remove_node(node_id=self.node_id, force=self.force)
except APIError as exc:
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
self.results['actions'].append("Node is removed from swarm cluster.")
self.differences.add('joined', parameter=False, active=True)
self.results['changed'] = True
def _detect_remove_operation(client):
return client.module.params['state'] == 'remove'
def main():
argument_spec = dict(
advertise_addr=dict(type='str'),
data_path_addr=dict(type='str'),
data_path_port=dict(type='int'),
state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']),
force=dict(type='bool', default=False),
listen_addr=dict(type='str', default='0.0.0.0:2377'),
remote_addrs=dict(type='list', elements='str'),
join_token=dict(type='str', no_log=True),
snapshot_interval=dict(type='int'),
task_history_retention_limit=dict(type='int'),
keep_old_snapshots=dict(type='int'),
log_entries_for_slow_followers=dict(type='int'),
heartbeat_tick=dict(type='int'),
election_tick=dict(type='int'),
dispatcher_heartbeat_period=dict(type='int'),
node_cert_expiry=dict(type='int'),
name=dict(type='str'),
labels=dict(type='dict'),
signing_ca_cert=dict(type='str'),
signing_ca_key=dict(type='str', no_log=True),
ca_force_rotate=dict(type='int'),
autolock_managers=dict(type='bool'),
node_id=dict(type='str'),
rotate_worker_token=dict(type='bool', default=False),
rotate_manager_token=dict(type='bool', default=False),
default_addr_pool=dict(type='list', elements='str'),
subnet_size=dict(type='int'),
)
required_if = [
('state', 'join', ['remote_addrs', 'join_token']),
('state', 'remove', ['node_id'])
]
option_minimal_versions = dict(
labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
autolock_managers=dict(docker_py_version='2.6.0'),
log_driver=dict(docker_py_version='2.6.0'),
remove_operation=dict(
docker_py_version='2.4.0',
detect_usage=_detect_remove_operation,
usage_msg='remove swarm nodes'
),
default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
data_path_addr=dict(docker_py_version='4.0.0', docker_api_version='1.30'),
data_path_port=dict(docker_py_version='6.0.0', docker_api_version='1.40'),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='1.10.0',
option_minimal_versions=option_minimal_versions,
)
sanitize_labels(client.module.params['labels'], 'labels', client)
try:
results = dict(
changed=False,
result='',
actions=[]
)
SwarmManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,379 @@
#!/usr/bin/python
#
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_swarm_info
short_description: Retrieves facts about Docker Swarm cluster
description:
- Retrieves facts about a Docker Swarm.
- Returns lists of swarm objects names for the services - nodes, services, tasks.
- The output differs depending on API version available on docker host.
- Must be run on Swarm Manager node; otherwise module fails with error message. It does return boolean flags in on both
error and success which indicate whether the docker daemon can be communicated with, whether it is in Swarm mode, and
whether it is a Swarm Manager node.
author:
- Piotr Wojciechowski (@WojciechowskiPiotr)
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
nodes:
description:
- Whether to list swarm nodes.
type: bool
default: false
nodes_filters:
description:
- A dictionary of filter values used for selecting nodes to list.
- 'For example, C(name: mynode).'
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering) for more information
on possible filters.
type: dict
services:
description:
- Whether to list swarm services.
type: bool
default: false
services_filters:
description:
- A dictionary of filter values used for selecting services to list.
- 'For example, C(name: myservice).'
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering) for more
information on possible filters.
type: dict
tasks:
description:
- Whether to list containers.
type: bool
default: false
tasks_filters:
description:
- A dictionary of filter values used for selecting tasks to list.
- 'For example, C(node: mynode-1).'
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering) for more
information on possible filters.
type: dict
unlock_key:
description:
- Whether to retrieve the swarm unlock key.
type: bool
default: false
verbose_output:
description:
- When set to V(true) and O(nodes), O(services), or O(tasks) is set to V(true), then the module output will contain
verbose information about objects matching the full output of API method.
- For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
- The verbose output in this module contains only subset of information returned by this info module for each type of
the objects.
type: bool
default: false
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get info on Docker Swarm
community.docker.docker_swarm_info:
ignore_errors: true
register: result
- name: Inform about basic flags
ansible.builtin.debug:
msg: |
Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
Docker in Swarm mode: {{ result.docker_swarm_active }}
This is a Manager node: {{ result.docker_swarm_manager }}
- name: Get info on Docker Swarm and list of registered nodes
community.docker.docker_swarm_info:
nodes: true
register: result
- name: Get info on Docker Swarm and extended list of registered nodes
community.docker.docker_swarm_info:
nodes: true
verbose_output: true
register: result
- name: Get info on Docker Swarm and filtered list of registered nodes
community.docker.docker_swarm_info:
nodes: true
nodes_filters:
name: mynode
register: result
- name: Show swarm facts
ansible.builtin.debug:
var: result.swarm_facts
- name: Get the swarm unlock key
community.docker.docker_swarm_info:
unlock_key: true
register: result
- name: Print swarm unlock key
ansible.builtin.debug:
var: result.swarm_unlock_key
"""
RETURN = r"""
can_talk_to_docker:
description:
- Will be V(true) if the module can talk to the docker daemon.
returned: both on success and on error
type: bool
docker_swarm_active:
description:
- Will be V(true) if the module can talk to the docker daemon, and the docker daemon is in Swarm mode.
returned: both on success and on error
type: bool
docker_swarm_manager:
description:
- Will be V(true) if the module can talk to the docker daemon, the docker daemon is in Swarm mode, and the current node
is a manager node.
- Only if this one is V(true), the module will not fail.
returned: both on success and on error
type: bool
swarm_facts:
description:
- Facts representing the basic state of the docker Swarm cluster.
- Contains tokens to connect to the Swarm.
returned: always
type: dict
swarm_unlock_key:
description:
- Contains the key needed to unlock the swarm.
returned: When O(unlock_key=true).
type: str
nodes:
description:
- List of dict objects containing the basic information about each volume. Keys matches the C(docker node ls) output unless
O(verbose_output=true). See description for O(verbose_output).
returned: When O(nodes=true)
type: list
elements: dict
services:
description:
- List of dict objects containing the basic information about each volume. Keys matches the C(docker service ls) output
unless O(verbose_output=true). See description for O(verbose_output).
returned: When O(services=true)
type: list
elements: dict
tasks:
description:
- List of dict objects containing the basic information about each volume. Keys matches the C(docker service ps) output
unless O(verbose_output=true). See description for O(verbose_output).
returned: When O(tasks=true)
type: list
elements: dict
"""
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker_common
pass
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
from ansible_collections.community.docker.plugins.module_utils.common import RequestException
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
clean_dict_booleans_for_docker_api,
)
class DockerSwarmManager(DockerBaseClass):
def __init__(self, client, results):
super(DockerSwarmManager, self).__init__()
self.client = client
self.results = results
self.verbose_output = self.client.module.params['verbose_output']
listed_objects = ['tasks', 'services', 'nodes']
self.client.fail_task_if_not_swarm_manager()
self.results['swarm_facts'] = self.get_docker_swarm_facts()
for docker_object in listed_objects:
if self.client.module.params[docker_object]:
returned_name = docker_object
filter_name = docker_object + "_filters"
filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
if self.client.module.params['unlock_key']:
self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
def get_docker_swarm_facts(self):
try:
return self.client.inspect_swarm()
except APIError as exc:
self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
def get_docker_items_list(self, docker_object=None, filters=None):
items = None
items_list = []
try:
if docker_object == 'nodes':
items = self.client.nodes(filters=filters)
elif docker_object == 'tasks':
items = self.client.tasks(filters=filters)
elif docker_object == 'services':
items = self.client.services(filters=filters)
except APIError as exc:
self.client.fail("Error inspecting docker swarm for object '%s': %s" %
(docker_object, to_native(exc)))
if self.verbose_output:
return items
for item in items:
item_record = dict()
if docker_object == 'nodes':
item_record = self.get_essential_facts_nodes(item)
elif docker_object == 'tasks':
item_record = self.get_essential_facts_tasks(item)
elif docker_object == 'services':
item_record = self.get_essential_facts_services(item)
if item_record.get('Mode') == 'Global':
item_record['Replicas'] = len(items)
items_list.append(item_record)
return items_list
@staticmethod
def get_essential_facts_nodes(item):
object_essentials = dict()
object_essentials['ID'] = item.get('ID')
object_essentials['Hostname'] = item['Description']['Hostname']
object_essentials['Status'] = item['Status']['State']
object_essentials['Availability'] = item['Spec']['Availability']
if 'ManagerStatus' in item:
object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
object_essentials['ManagerStatus'] = "Leader"
else:
object_essentials['ManagerStatus'] = None
object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
return object_essentials
def get_essential_facts_tasks(self, item):
object_essentials = dict()
object_essentials['ID'] = item['ID']
# Returning container ID to not trigger another connection to host
# Container ID is sufficient to get extended info in other tasks
object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
object_essentials['DesiredState'] = item['DesiredState']
object_essentials['CurrentState'] = item['Status']['State']
if 'Err' in item['Status']:
object_essentials['Error'] = item['Status']['Err']
else:
object_essentials['Error'] = None
return object_essentials
@staticmethod
def get_essential_facts_services(item):
object_essentials = dict()
object_essentials['ID'] = item['ID']
object_essentials['Name'] = item['Spec']['Name']
if 'Replicated' in item['Spec']['Mode']:
object_essentials['Mode'] = "Replicated"
object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
elif 'Global' in item['Spec']['Mode']:
object_essentials['Mode'] = "Global"
# Number of replicas have to be updated in calling method or may be left as None
object_essentials['Replicas'] = None
object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
if item['Spec'].get('EndpointSpec') and 'Ports' in item['Spec']['EndpointSpec']:
object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
else:
object_essentials['Ports'] = []
return object_essentials
def get_docker_swarm_unlock_key(self):
unlock_key = self.client.get_unlock_key() or {}
return unlock_key.get('UnlockKey') or None
def main():
argument_spec = dict(
nodes=dict(type='bool', default=False),
nodes_filters=dict(type='dict'),
tasks=dict(type='bool', default=False),
tasks_filters=dict(type='dict'),
services=dict(type='bool', default=False),
services_filters=dict(type='dict'),
unlock_key=dict(type='bool', default=False),
verbose_output=dict(type='bool', default=False),
)
option_minimal_versions = dict(
unlock_key=dict(docker_py_version='2.7.0'),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='1.10.0',
option_minimal_versions=option_minimal_versions,
fail_results=dict(
can_talk_to_docker=False,
docker_swarm_active=False,
docker_swarm_manager=False,
),
)
client.fail_results['can_talk_to_docker'] = True
client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
try:
results = dict(
changed=False,
)
DockerSwarmManager(client, results)
results.update(client.fail_results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,120 @@
#!/usr/bin/python
#
# Copyright (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_swarm_service_info
short_description: Retrieves information about docker services from a Swarm Manager
description:
- Retrieves information about a docker service.
- Essentially returns the output of C(docker service inspect <name>).
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_1_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- The name of the service to inspect.
type: str
required: true
author:
- Hannes Ljungberg (@hannseman)
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get info from a service
community.docker.docker_swarm_service_info:
name: myservice
register: result
"""
RETURN = r"""
exists:
description:
- Returns whether the service exists.
type: bool
returned: always
sample: true
service:
description:
- A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
- Will be V(none) if service does not exist.
returned: always
type: dict
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
try:
from docker.errors import DockerException
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils.common import (
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
def get_service_info(client):
service = client.module.params['name']
return client.get_service_inspect(
service_id=service,
skip_missing=True
)
def main():
argument_spec = dict(
name=dict(type='str', required=True),
)
client = AnsibleDockerSwarmClient(
argument_spec=argument_spec,
supports_check_mode=True,
min_docker_version='2.0.0',
)
client.fail_task_if_not_swarm_manager()
try:
service = get_service_info(client)
client.module.exit_json(
changed=False,
service=service,
exists=bool(service)
)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,316 @@
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_volume
short_description: Manage Docker volumes
description:
- Create/remove Docker volumes.
- Performs largely the same function as the C(docker volume) CLI subcommand.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
attributes:
check_mode:
support: full
diff_mode:
support: full
idempotent:
support: partial
details:
- If O(recreate=always) the module is not idempotent.
options:
volume_name:
description:
- Name of the volume to operate on.
type: str
required: true
aliases:
- name
driver:
description:
- Specify the type of volume. Docker provides the V(local) driver, but 3rd party drivers can also be used.
type: str
default: local
driver_options:
description:
- 'Dictionary of volume settings. Consult the Docker documentation for valid options and values:
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options).'
type: dict
default: {}
labels:
description:
- Dictionary of label key/values to set for the volume.
type: dict
recreate:
description:
- Controls when a volume will be recreated when O(state=present). Please note that recreating an existing volume will
cause B(any data in the existing volume to be lost!) The volume will be deleted and a new volume with the same name
will be created.
- The value V(always) forces the volume to be always recreated.
- The value V(never) makes sure the volume will not be recreated.
- The value V(options-changed) makes sure the volume will be recreated if the volume already exist and the driver, driver
options or labels differ.
type: str
default: never
choices:
- always
- never
- options-changed
state:
description:
- V(absent) deletes the volume.
- V(present) creates the volume, if it does not already exist.
type: str
default: present
choices:
- absent
- present
author:
- Alex Grönholm (@agronholm)
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Create a volume
community.docker.docker_volume:
name: volume_one
- name: Remove a volume
community.docker.docker_volume:
name: volume_one
state: absent
- name: Create a volume with options
community.docker.docker_volume:
name: volume_two
driver_options:
type: btrfs
device: /dev/sda2
"""
RETURN = r"""
volume:
description:
- Volume inspection results for the affected volume.
returned: success
type: dict
sample: {}
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import iteritems
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DockerBaseClass,
DifferenceTracker,
sanitize_labels,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
APIError,
DockerException,
)
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.volume_name = None
self.driver = None
self.driver_options = None
self.labels = None
self.recreate = None
self.debug = None
for key, value in iteritems(client.module.params):
setattr(self, key, value)
class DockerVolumeManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_volume = self.get_existing_volume()
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_volume(self):
try:
volumes = self.client.get_json('/volumes')
except APIError as e:
self.client.fail(to_native(e))
if volumes[u'Volumes'] is None:
return None
for volume in volumes[u'Volumes']:
if volume['Name'] == self.parameters.volume_name:
return volume
return None
def has_different_config(self):
"""
Return the list of differences between the current parameters and the existing volume.
:return: list of options that differ
"""
differences = DifferenceTracker()
if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
if self.parameters.driver_options:
if not self.existing_volume.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=self.existing_volume.get('Options'))
else:
for key, value in iteritems(self.parameters.driver_options):
if (not self.existing_volume['Options'].get(key) or
value != self.existing_volume['Options'][key]):
differences.add('driver_options.%s' % key,
parameter=value,
active=self.existing_volume['Options'].get(key))
if self.parameters.labels:
existing_labels = self.existing_volume.get('Labels') or {}
for label in self.parameters.labels:
if existing_labels.get(label) != self.parameters.labels.get(label):
differences.add('labels.%s' % label,
parameter=self.parameters.labels.get(label),
active=existing_labels.get(label))
return differences
def create_volume(self):
if not self.existing_volume:
if not self.check_mode:
try:
data = {
'Name': self.parameters.volume_name,
'Driver': self.parameters.driver,
'DriverOpts': self.parameters.driver_options,
}
if self.parameters.labels is not None:
data['Labels'] = self.parameters.labels
resp = self.client.post_json_to_json('/volumes/create', data=data)
self.existing_volume = self.client.get_json('/volumes/{0}', resp['Name'])
except APIError as e:
self.client.fail(to_native(e))
self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
self.results['changed'] = True
def remove_volume(self):
if self.existing_volume:
if not self.check_mode:
try:
self.client.delete_call('/volumes/{0}', self.parameters.volume_name)
except APIError as e:
self.client.fail(to_native(e))
self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
self.results['changed'] = True
def present(self):
differences = DifferenceTracker()
if self.existing_volume:
differences = self.has_different_config()
self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
self.remove_volume()
self.existing_volume = None
self.create_volume()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
volume_facts = self.get_existing_volume()
self.results['volume'] = volume_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
self.remove_volume()
def main():
argument_spec = dict(
volume_name=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='local'),
driver_options=dict(type='dict', default={}),
labels=dict(type='dict'),
recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
debug=dict(type='bool', default=False)
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
# "The docker server >= 1.9.0"
)
sanitize_labels(client.module.params['labels'], 'labels', client)
try:
cm = DockerVolumeManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()

View File

@ -0,0 +1,120 @@
#!/usr/bin/python
# coding: utf-8
#
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: docker_volume_info
short_description: Retrieve facts about Docker volumes
description:
- Performs largely the same function as the C(docker volume inspect) CLI subcommand.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.attributes
- community.docker.attributes.actiongroup_docker
- community.docker.attributes.info_module
- community.docker.attributes.idempotent_not_modify_state
options:
name:
description:
- Name of the volume to inspect.
type: str
required: true
aliases:
- volume_name
author:
- Felix Fontein (@felixfontein)
requirements:
- "Docker API >= 1.25"
"""
EXAMPLES = r"""
- name: Get infos on volume
community.docker.docker_volume_info:
name: mydata
register: result
- name: Does volume exist?
ansible.builtin.debug:
msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
- name: Print information about volume
ansible.builtin.debug:
var: result.volume
when: result.exists
"""
RETURN = r"""
exists:
description:
- Returns whether the volume exists.
type: bool
returned: always
sample: true
volume:
description:
- Volume inspection results for the affected volume.
- Will be V(none) if volume does not exist.
returned: success
type: dict
sample: '{ "CreatedAt": "2018-12-09T17:43:44+01:00", "Driver": "local", "Labels": null, "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
"Name": "ansible-test-bd3f6172", "Options": {}, "Scope": "local" }'
"""
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common_api import (
AnsibleDockerClient,
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
def get_existing_volume(client, volume_name):
try:
return client.get_json('/volumes/{0}', volume_name)
except NotFound as dummy:
return None
except Exception as exc:
client.fail("Error inspecting volume: %s" % to_native(exc))
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['volume_name']),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
volume = get_existing_volume(client, client.module.params['name'])
client.module.exit_json(
changed=False,
exists=(True if volume else False),
volume=volume,
)
except DockerException as e:
client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()