add docker examples
This commit is contained in:
parent
453233a25b
commit
f135dcc118
@ -42,14 +42,11 @@ jobs:
|
||||
matrix:
|
||||
distro:
|
||||
- rockylinux9
|
||||
- rockylinux8
|
||||
- ubuntu2404
|
||||
- ubuntu2204
|
||||
- ubuntu2004
|
||||
- debian12
|
||||
- debian11
|
||||
- debian10
|
||||
- fedora39
|
||||
- fedora40
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ansible Role: Docker
|
||||
|
||||
[](https://github.com/geerlingguy/ansible-role-docker/actions?query=workflow%3ACI)
|
||||
[](https://github.com/geerlingguy/ansible-role-docker/actions/workflows/ci.yml)
|
||||
|
||||
An Ansible Role that installs [Docker](https://www.docker.com) on Linux.
|
||||
|
||||
@ -34,11 +34,19 @@ docker_obsolete_packages:
|
||||
- docker
|
||||
- docker.io
|
||||
- docker-engine
|
||||
- docker-doc
|
||||
- docker-compose
|
||||
- docker-compose-v2
|
||||
- podman-docker
|
||||
- containerd
|
||||
- runc
|
||||
```
|
||||
|
||||
`docker_obsolete_packages` for different os-family:
|
||||
|
||||
- [`RedHat.yaml`](./vars/RedHat.yml)
|
||||
- [`Debian.yaml`](./vars/Debian.yml)
|
||||
|
||||
A list of packages to be uninstalled prior to running this role. See [Docker's installation instructions](https://docs.docker.com/engine/install/debian/#uninstall-old-versions) for an up-to-date list of old packages that should be removed.
|
||||
|
||||
```yaml
|
||||
@ -51,7 +59,7 @@ docker_restart_handler_state: restarted
|
||||
Variables to control the state of the `docker` service, and whether it should start on boot. If you're installing Docker inside a Docker container without systemd or sysvinit, you should set `docker_service_manage` to `false`.
|
||||
|
||||
```yaml
|
||||
docker_install_compose_plugin: false
|
||||
docker_install_compose_plugin: true
|
||||
docker_compose_package: docker-compose-plugin
|
||||
docker_compose_package_state: present
|
||||
```
|
||||
@ -59,9 +67,10 @@ docker_compose_package_state: present
|
||||
Docker Compose Plugin installation options. These differ from the below in that docker-compose is installed as a docker plugin (and used with `docker compose`) instead of a standalone binary.
|
||||
|
||||
```yaml
|
||||
docker_install_compose: true
|
||||
docker_compose_version: "1.26.0"
|
||||
docker_install_compose: false
|
||||
docker_compose_version: "v2.32.1"
|
||||
docker_compose_arch: "{{ ansible_architecture }}"
|
||||
docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}"
|
||||
docker_compose_path: /usr/local/bin/docker-compose
|
||||
```
|
||||
|
||||
@ -82,7 +91,7 @@ The main Docker repo URL, common between Debian and RHEL systems.
|
||||
```yaml
|
||||
docker_apt_release_channel: stable
|
||||
docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
docker_apt_repository: "deb [arch={{ docker_apt_arch }}] {{ docker_repo_url }}/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
|
||||
docker_apt_repository: "deb [arch={{ docker_apt_arch }}{{' signed-by=/etc/apt/keyrings/docker.asc' if add_repository_key is not failed}}] {{ docker_repo_url }}/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
|
||||
docker_apt_ignore_key_error: True
|
||||
docker_apt_gpg_key: "{{ docker_repo_url }}/{{ ansible_distribution | lower }}/gpg"
|
||||
docker_apt_filename: "docker"
|
||||
@ -115,7 +124,7 @@ A list of system users to be added to the `docker` group (so they can use Docker
|
||||
|
||||
```yaml
|
||||
docker_daemon_options:
|
||||
storage-driver: "devicemapper"
|
||||
storage-driver: "overlay2"
|
||||
log-opts:
|
||||
max-size: "100m"
|
||||
```
|
||||
|
@ -12,6 +12,9 @@ docker_obsolete_packages:
|
||||
- docker
|
||||
- docker.io
|
||||
- docker-engine
|
||||
- docker-doc
|
||||
- docker-compose
|
||||
- docker-compose-v2
|
||||
- podman-docker
|
||||
- containerd
|
||||
- runc
|
||||
@ -29,7 +32,7 @@ docker_compose_package_state: present
|
||||
|
||||
# Docker Compose options.
|
||||
docker_install_compose: false
|
||||
docker_compose_version: "v2.11.1"
|
||||
docker_compose_version: "v2.32.1"
|
||||
docker_compose_arch: "{{ ansible_architecture }}"
|
||||
docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}"
|
||||
docker_compose_path: /usr/local/bin/docker-compose
|
||||
|
@ -1,2 +1,2 @@
|
||||
install_date: 'Wed 17 Jul 2024 02:03:32 PM '
|
||||
version: 7.3.0
|
||||
install_date: 'Fri 11 Apr 2025 09:32:24 AM '
|
||||
version: 7.4.7
|
||||
|
@ -22,6 +22,7 @@ galaxy_info:
|
||||
- bionic
|
||||
- focal
|
||||
- jammy
|
||||
- noble
|
||||
- name: Alpine
|
||||
version:
|
||||
- all
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
# become: true
|
||||
|
||||
pre_tasks:
|
||||
- name: Update apt cache.
|
||||
|
@ -8,7 +8,7 @@ driver:
|
||||
name: docker
|
||||
platforms:
|
||||
- name: instance
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux8}-ansible:latest"
|
||||
image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:rw
|
||||
|
51
docker/roles/geerlingguy.docker/molecule/default/verify.yml
Normal file
51
docker/roles/geerlingguy.docker/molecule/default/verify.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Verify Docker Role
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Verify Docker binary is available
|
||||
command: docker version
|
||||
register: docker_version_result
|
||||
changed_when: false
|
||||
failed_when: docker_version_result.rc != 0
|
||||
|
||||
- name: Show Docker version details
|
||||
debug:
|
||||
msg: >
|
||||
Docker Version Output:
|
||||
{{ docker_version_result.stdout_lines | join('\n') }}
|
||||
|
||||
- name: Verify Docker service is running
|
||||
command: systemctl is-active docker
|
||||
register: docker_service_status
|
||||
when: ansible_service_mgr == 'systemd'
|
||||
changed_when: false
|
||||
failed_when: docker_service_status.stdout.strip() != "active"
|
||||
|
||||
- name: Display Docker service status
|
||||
debug:
|
||||
msg: "Docker service is {{ docker_service_status.stdout.strip() }}"
|
||||
when: ansible_service_mgr == 'systemd'
|
||||
|
||||
- name: Pull the 'hello-world' image
|
||||
command: docker pull hello-world
|
||||
register: docker_pull_result
|
||||
changed_when: true
|
||||
failed_when: docker_pull_result.rc != 0
|
||||
|
||||
- name: Show result of pulling the 'hello-world' image
|
||||
debug:
|
||||
msg: >
|
||||
Pulling 'hello-world' completed with output:
|
||||
{{ docker_pull_result.stdout_lines | join('\n') }}
|
||||
|
||||
- name: Run a test container (hello-world)
|
||||
command: docker run --rm hello-world
|
||||
register: docker_run_result
|
||||
changed_when: true
|
||||
failed_when: docker_run_result.rc != 0
|
||||
|
||||
- name: Display test container output
|
||||
debug:
|
||||
msg: >
|
||||
Running 'hello-world' container completed with output:
|
||||
{{ docker_run_result.stdout_lines | join('\n') }}
|
@ -1,4 +1,22 @@
|
||||
---
|
||||
- name: Ensure apt key is not present in trusted.gpg.d
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/trusted.gpg.d/docker.asc
|
||||
state: absent
|
||||
|
||||
- name: Ensure old apt source list is not present in /etc/apt/sources.list.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/apt/sources.list.d/download_docker_com_linux_{{ docker_apt_ansible_distribution | lower }}.list"
|
||||
state: absent
|
||||
|
||||
- name: Ensure the repo referencing the previous trusted.gpg.d key is not present
|
||||
apt_repository:
|
||||
repo: "deb [arch={{ docker_apt_arch }} signed-by=/etc/apt/trusted.gpg.d/docker.asc] {{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
|
||||
state: absent
|
||||
filename: "{{ docker_apt_filename }}"
|
||||
update_cache: true
|
||||
when: docker_add_repo | bool
|
||||
|
||||
- # See https://docs.docker.com/engine/install/debian/#uninstall-old-versions
|
||||
name: Ensure old versions of Docker are not installed.
|
||||
package:
|
||||
@ -17,13 +35,13 @@
|
||||
file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: '0755'
|
||||
mode: "0755"
|
||||
|
||||
- name: Add Docker apt key.
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ docker_apt_gpg_key }}"
|
||||
dest: /etc/apt/keyrings/docker.asc
|
||||
mode: '0644'
|
||||
mode: "0644"
|
||||
force: false
|
||||
checksum: "{{ docker_apt_gpg_key_checksum | default(omit) }}"
|
||||
register: add_repository_key
|
||||
|
@ -1,10 +1,7 @@
|
||||
---
|
||||
- name: Ensure old versions of Docker are not installed.
|
||||
package:
|
||||
name:
|
||||
- docker
|
||||
- docker-common
|
||||
- docker-engine
|
||||
name: "{{ docker_obsolete_packages }}"
|
||||
state: absent
|
||||
|
||||
- name: Add Docker GPG key.
|
||||
|
@ -1,2 +1,3 @@
|
||||
---
|
||||
docker_packages: "docker"
|
||||
docker_compose_package: docker-cli-compose
|
||||
|
14
docker/roles/geerlingguy.docker/vars/Debian.yml
Normal file
14
docker/roles/geerlingguy.docker/vars/Debian.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
# Used only for Debian/Ubuntu (Debian OS-Family)
|
||||
# https://docs.docker.com/engine/install/debian/#uninstall-old-versions
|
||||
|
||||
docker_obsolete_packages:
|
||||
- docker
|
||||
- docker.io
|
||||
- docker-engine
|
||||
- docker-doc
|
||||
- docker-compose
|
||||
- docker-compose-v2
|
||||
- podman-docker
|
||||
- containerd
|
||||
- runc
|
14
docker/roles/geerlingguy.docker/vars/RedHat.yml
Normal file
14
docker/roles/geerlingguy.docker/vars/RedHat.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
# Used only for Fedora/Rocky (RedHat OS-Family)
|
||||
# https://docs.docker.com/engine/install/fedora/#uninstall-old-versions
|
||||
# https://docs.docker.com/engine/install/centos/#uninstall-old-versions
|
||||
|
||||
docker_obsolete_packages:
|
||||
- docker
|
||||
- docker-client
|
||||
- docker-client-latest
|
||||
- docker-common
|
||||
- docker-latest
|
||||
- docker-latest-logrotate
|
||||
- docker-logrotate
|
||||
- docker-engine
|
@ -0,0 +1,8 @@
|
||||
download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/community-docker-4.5.2.tar.gz
|
||||
format_version: 1.0.0
|
||||
name: docker
|
||||
namespace: community
|
||||
server: https://galaxy.ansible.com/api/
|
||||
signatures: []
|
||||
version: 4.5.2
|
||||
version_url: /api/v3/plugin/ansible/content/published/collections/index/community/docker/versions/4.5.2/
|
@ -0,0 +1,8 @@
|
||||
download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/community-library_inventory_filtering_v1-1.0.2.tar.gz
|
||||
format_version: 1.0.0
|
||||
name: library_inventory_filtering_v1
|
||||
namespace: community
|
||||
server: https://galaxy.ansible.com/api/
|
||||
signatures: []
|
||||
version: 1.0.2
|
||||
version_url: /api/v3/plugin/ansible/content/published/collections/index/community/library_inventory_filtering_v1/versions/1.0.2/
|
@ -0,0 +1,9 @@
|
||||
<!--
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
-->
|
||||
|
||||
## Azure Pipelines Configuration
|
||||
|
||||
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
@ -0,0 +1,284 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
trigger:
|
||||
batch: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
pr:
|
||||
autoCancel: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
schedules:
|
||||
- cron: 0 9 * * *
|
||||
displayName: Nightly
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- main
|
||||
- cron: 0 12 * * 0
|
||||
displayName: Weekly (old stable branches)
|
||||
always: true
|
||||
branches:
|
||||
include:
|
||||
- stable-3
|
||||
|
||||
variables:
|
||||
- name: checkoutPath
|
||||
value: ansible_collections/community/docker
|
||||
- name: coverageBranches
|
||||
value: main
|
||||
- name: entryPoint
|
||||
value: tests/utils/shippable/shippable.sh
|
||||
- name: fetchDepth
|
||||
value: 0
|
||||
|
||||
resources:
|
||||
containers:
|
||||
- container: default
|
||||
image: quay.io/ansible/azure-pipelines-test-container:6.0.0
|
||||
|
||||
pool: Standard
|
||||
|
||||
stages:
|
||||
|
||||
### Sanity & units
|
||||
- stage: Ansible_devel
|
||||
displayName: Sanity & Units devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: 'devel/sanity/1'
|
||||
- name: Sanity Extra # Only on devel
|
||||
test: 'devel/sanity/extra'
|
||||
- name: Units
|
||||
test: 'devel/units/1'
|
||||
- stage: Ansible_2_18
|
||||
displayName: Sanity & Units 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.18/sanity/1'
|
||||
- name: Units
|
||||
test: '2.18/units/1'
|
||||
- stage: Ansible_2_17
|
||||
displayName: Sanity & Units 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.17/sanity/1'
|
||||
- name: Units
|
||||
test: '2.17/units/1'
|
||||
- stage: Ansible_2_16
|
||||
displayName: Sanity & Units 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
targets:
|
||||
- name: Sanity
|
||||
test: '2.16/sanity/1'
|
||||
- name: Units
|
||||
test: '2.16/units/1'
|
||||
|
||||
### Docker
|
||||
- stage: Docker_devel
|
||||
displayName: Docker devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 41
|
||||
test: fedora41
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Ubuntu 24.04
|
||||
test: ubuntu2404
|
||||
- name: Alpine 3.21
|
||||
test: alpine321
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_18
|
||||
displayName: Docker 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.18/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 40
|
||||
test: fedora40
|
||||
- name: Ubuntu 22.04
|
||||
test: ubuntu2204
|
||||
- name: Alpine 3.20
|
||||
test: alpine320
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_17
|
||||
displayName: Docker 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.17/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 39
|
||||
test: fedora39
|
||||
- name: Ubuntu 20.04
|
||||
test: ubuntu2004
|
||||
- name: Alpine 3.19
|
||||
test: alpine319
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
- stage: Docker_2_16
|
||||
displayName: Docker 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.16/linux/{0}
|
||||
targets:
|
||||
- name: Fedora 38
|
||||
test: fedora38
|
||||
- name: CentOS 7
|
||||
test: centos7
|
||||
- name: openSUSE 15
|
||||
test: opensuse15
|
||||
- name: Alpine 3
|
||||
test: alpine3
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Community Docker
|
||||
- stage: Docker_community_devel
|
||||
displayName: Docker (community images) devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/linux-community/{0}
|
||||
targets:
|
||||
- name: Debian Bullseye
|
||||
test: debian-bullseye/3.9
|
||||
- name: Debian Bookworm
|
||||
test: debian-bookworm/3.11
|
||||
- name: ArchLinux
|
||||
test: archlinux/3.13
|
||||
groups:
|
||||
- 4
|
||||
- 5
|
||||
|
||||
### Remote
|
||||
- stage: Remote_devel
|
||||
displayName: Remote devel
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: devel/{0}
|
||||
targets:
|
||||
- name: RHEL 9.5 with Docker SDK, urllib3, requests from sources
|
||||
test: rhel/9.5-dev-latest
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_18
|
||||
displayName: Remote 2.18
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.18/{0}
|
||||
targets:
|
||||
- name: RHEL 9.4
|
||||
test: rhel/9.4
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_17
|
||||
displayName: Remote 2.17
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.17/{0}
|
||||
targets:
|
||||
- name: RHEL 9.3
|
||||
test: rhel/9.3
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- stage: Remote_2_16
|
||||
displayName: Remote 2.16
|
||||
dependsOn: []
|
||||
jobs:
|
||||
- template: templates/matrix.yml
|
||||
parameters:
|
||||
testFormat: 2.16/{0}
|
||||
targets:
|
||||
- name: RHEL 9.2
|
||||
test: rhel/9.2
|
||||
# Currently always hangs in group 2
|
||||
# - name: RHEL 8.8
|
||||
# test: rhel/8.8
|
||||
- name: RHEL 7.9
|
||||
test: rhel/7.9
|
||||
groups:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
|
||||
## Finally
|
||||
|
||||
- stage: Summary
|
||||
condition: succeededOrFailed()
|
||||
dependsOn:
|
||||
- Ansible_devel
|
||||
- Ansible_2_18
|
||||
- Ansible_2_17
|
||||
- Ansible_2_16
|
||||
- Remote_devel
|
||||
- Remote_2_18
|
||||
- Remote_2_17
|
||||
- Remote_2_16
|
||||
- Docker_devel
|
||||
- Docker_2_18
|
||||
- Docker_2_17
|
||||
- Docker_2_16
|
||||
- Docker_community_devel
|
||||
jobs:
|
||||
- template: templates/coverage.yml
|
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
# Aggregate code coverage results for later processing.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
agent_temp_directory="$1"
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
mkdir "${agent_temp_directory}/coverage/"
|
||||
|
||||
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
options=(--venv --venv-system-site-packages --color -v)
|
||||
|
||||
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||
|
||||
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||
fi
|
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
source_directory = sys.argv[1]
|
||||
|
||||
if '/ansible_collections/' in os.getcwd():
|
||||
output_path = "tests/output"
|
||||
else:
|
||||
output_path = "test/results"
|
||||
|
||||
destination_directory = os.path.join(output_path, 'coverage')
|
||||
|
||||
if not os.path.exists(destination_directory):
|
||||
os.makedirs(destination_directory)
|
||||
|
||||
jobs = {}
|
||||
count = 0
|
||||
|
||||
for name in os.listdir(source_directory):
|
||||
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||
label = match.group('label')
|
||||
attempt = int(match.group('attempt'))
|
||||
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||
|
||||
for label, attempt in jobs.items():
|
||||
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||
source = os.path.join(source_directory, name)
|
||||
source_files = os.listdir(source)
|
||||
|
||||
for source_file in source_files:
|
||||
source_path = os.path.join(source, source_file)
|
||||
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||
shutil.copyfile(source_path, destination_path)
|
||||
count += 1
|
||||
|
||||
print('Coverage file count: %d' % count)
|
||||
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
# Check the test results and set variables for use in later steps.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||
output_path="tests/output"
|
||||
else
|
||||
output_path="test/results"
|
||||
fi
|
||||
|
||||
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||
|
||||
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||
fi
|
||||
|
||||
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||
fi
|
@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Upload code coverage reports to codecov.io.
|
||||
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing as t
|
||||
import urllib.request
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CoverageFile:
|
||||
name: str
|
||||
path: pathlib.Path
|
||||
flags: t.List[str]
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Args:
|
||||
dry_run: bool
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
def parse_args() -> Args:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||
parser.add_argument('path', type=pathlib.Path)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store arguments in a typed dataclass
|
||||
fields = dataclasses.fields(Args)
|
||||
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||
|
||||
return Args(**kwargs)
|
||||
|
||||
|
||||
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||
processed = []
|
||||
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||
name = file.stem.replace('coverage=', '')
|
||||
|
||||
# Get flags from name
|
||||
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||
|
||||
processed.append(CoverageFile(name, file, flags))
|
||||
|
||||
return tuple(processed)
|
||||
|
||||
|
||||
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||
for file in files:
|
||||
cmd = [
|
||||
str(codecov_bin),
|
||||
'--name', file.name,
|
||||
'--file', str(file.path),
|
||||
]
|
||||
for flag in file.flags:
|
||||
cmd.extend(['--flags', flag])
|
||||
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would run command: {cmd}')
|
||||
continue
|
||||
|
||||
subprocess.run(cmd, check=True)
|
||||
|
||||
|
||||
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||
if dry_run:
|
||||
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||
return
|
||||
|
||||
with urllib.request.urlopen(url) as resp:
|
||||
with dest.open('w+b') as f:
|
||||
# Read data in chunks rather than all at once
|
||||
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||
|
||||
dest.chmod(flags)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||
|
||||
files = process_files(args.path)
|
||||
upload_files(codecov_bin, files, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
PATH="${PWD}/bin:${PATH}"
|
||||
|
||||
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
if ! ansible-test --help >/dev/null 2>&1; then
|
||||
# Install the devel version of ansible-test for generating code coverage reports.
|
||||
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||
fi
|
||||
|
||||
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
# Configure the test environment and run the tests.
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
set -o pipefail -eu
|
||||
|
||||
entry_point="$1"
|
||||
test="$2"
|
||||
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||
|
||||
export COMMIT_MESSAGE
|
||||
export COMPLETE
|
||||
export COVERAGE
|
||||
export IS_PULL_REQUEST
|
||||
|
||||
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||
IS_PULL_REQUEST=true
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||
else
|
||||
IS_PULL_REQUEST=
|
||||
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||
fi
|
||||
|
||||
COMPLETE=
|
||||
COVERAGE=
|
||||
|
||||
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||
COMPLETE=yes
|
||||
|
||||
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||
COVERAGE=yes
|
||||
fi
|
||||
fi
|
||||
|
||||
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
"""Main program entry point."""
|
||||
start = time.time()
|
||||
|
||||
sys.stdin.reconfigure(errors='surrogateescape')
|
||||
sys.stdout.reconfigure(errors='surrogateescape')
|
||||
|
||||
for line in sys.stdin:
|
||||
seconds = time.time() - start
|
||||
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -0,0 +1,34 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template adds a job for processing code coverage data.
|
||||
# It will upload results to Azure Pipelines and codecov.io.
|
||||
# Use it from a job stage that completes after all other jobs have completed.
|
||||
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||
|
||||
jobs:
|
||||
- job: Coverage
|
||||
displayName: Code Coverage
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- task: DownloadPipelineArtifact@2
|
||||
displayName: Download Coverage Data
|
||||
inputs:
|
||||
path: coverage/
|
||||
patterns: "Coverage */*=coverage.combined"
|
||||
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||
displayName: Combine Coverage Data
|
||||
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||
displayName: Generate Coverage Report
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||
displayName: Publish to codecov.io
|
||||
condition: gt(variables.coverageFileCount, 0)
|
||||
continueOnError: true
|
@ -0,0 +1,59 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test target.
|
||||
# Each item in the list must contain a "test" or "name" key.
|
||||
# Both may be provided. If one is omitted, the other will be used.
|
||||
- name: targets
|
||||
type: object
|
||||
|
||||
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||
# Values can be strings or numbers.
|
||||
- name: groups
|
||||
type: object
|
||||
default: []
|
||||
|
||||
# An optional format string used to generate the job name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: nameFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to generate the test name.
|
||||
# - {0} is the name of an item in the targets list.
|
||||
- name: testFormat
|
||||
type: string
|
||||
default: "{0}"
|
||||
|
||||
# An optional format string used to add the group to the job name.
|
||||
# {0} is the formatted name of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: nameGroupFormat
|
||||
type: string
|
||||
default: "{0} - {{1}}"
|
||||
|
||||
# An optional format string used to add the group to the test name.
|
||||
# {0} is the formatted test of an item in the targets list.
|
||||
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||
- name: testGroupFormat
|
||||
type: string
|
||||
default: "{0}/{{1}}"
|
||||
|
||||
jobs:
|
||||
- template: test.yml
|
||||
parameters:
|
||||
jobs:
|
||||
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||
- ${{ each group in parameters.groups }}:
|
||||
- ${{ each target in parameters.targets }}:
|
||||
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
@ -0,0 +1,49 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||
# It can be used directly if needed, or through the matrix template.
|
||||
|
||||
parameters:
|
||||
# A required list of dictionaries, one per test job.
|
||||
# Each item in the list must contain a "job" and "name" key.
|
||||
- name: jobs
|
||||
type: object
|
||||
|
||||
jobs:
|
||||
- ${{ each job in parameters.jobs }}:
|
||||
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||
displayName: ${{ job.name }}
|
||||
container: default
|
||||
workspace:
|
||||
clean: all
|
||||
steps:
|
||||
- checkout: self
|
||||
fetchDepth: $(fetchDepth)
|
||||
path: $(checkoutPath)
|
||||
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||
displayName: Run Tests
|
||||
- bash: .azure-pipelines/scripts/process-results.sh
|
||||
condition: succeededOrFailed()
|
||||
displayName: Process Results
|
||||
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Aggregate Coverage Data
|
||||
- task: PublishTestResults@2
|
||||
condition: eq(variables.haveTestResults, 'true')
|
||||
inputs:
|
||||
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||
displayName: Publish Test Results
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveBotResults, 'true')
|
||||
displayName: Publish Bot Results
|
||||
inputs:
|
||||
targetPath: "$(outputPath)/bot/"
|
||||
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||
- task: PublishPipelineArtifact@1
|
||||
condition: eq(variables.haveCoverageData, 'true')
|
||||
displayName: Publish Coverage Data
|
||||
inputs:
|
||||
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
15
projet00/collections/ansible_collections/community/docker/.github/dependabot.yml
vendored
Normal file
15
projet00/collections/ansible_collections/community/docker/.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
ci:
|
||||
patterns:
|
||||
- "*"
|
9
projet00/collections/ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
9
projet00/collections/ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
backport_branch_prefix: patchback/backports/
|
||||
backport_label_prefix: backport-
|
||||
target_branch_prefix: stable-
|
||||
...
|
131
projet00/collections/ansible_collections/community/docker/.github/workflows/ansible-test.yml
vendored
Normal file
131
projet00/collections/ansible_collections/community/docker/.github/workflows/ansible-test.yml
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
|
||||
# https://github.com/marketplace/actions/ansible-test
|
||||
|
||||
name: EOL CI
|
||||
on:
|
||||
# Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run EOL CI once per day (at 09:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
|
||||
concurrency:
|
||||
# Make sure there is at most one active run per PR, but do not cancel any non-PR runs
|
||||
group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
sanity:
|
||||
name: EOL Sanity (Ⓐ${{ matrix.ansible }})
|
||||
strategy:
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.15'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Perform sanity testing
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
codecov-token: ${{ secrets.CODECOV_TOKEN }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
testing-type: sanity
|
||||
pre-test-cmd: >-
|
||||
git clone --depth=1 --single-branch --branch stable-1 https://github.com/ansible-collections/community.library_inventory_filtering.git ../../community/library_inventory_filtering_v1
|
||||
|
||||
units:
|
||||
runs-on: ubuntu-latest
|
||||
name: EOL Units (Ⓐ${{ matrix.ansible }})
|
||||
strategy:
|
||||
# As soon as the first unit test fails, cancel the others to free up the CI queue
|
||||
fail-fast: true
|
||||
matrix:
|
||||
ansible:
|
||||
- '2.15'
|
||||
|
||||
steps:
|
||||
- name: Perform unit testing against Ansible version ${{ matrix.ansible }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
codecov-token: ${{ secrets.CODECOV_TOKEN }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
pull-request-change-detection: 'true'
|
||||
testing-type: units
|
||||
pre-test-cmd: >-
|
||||
git clone --depth=1 --single-branch --branch stable-1 https://github.com/ansible-collections/community.library_inventory_filtering.git ../../community/library_inventory_filtering_v1
|
||||
;
|
||||
git clone --depth=1 --single-branch --branch main https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
|
||||
|
||||
integration:
|
||||
runs-on: ubuntu-latest
|
||||
name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ansible:
|
||||
- ''
|
||||
docker:
|
||||
- ''
|
||||
python:
|
||||
- ''
|
||||
target:
|
||||
- ''
|
||||
extra-constraints:
|
||||
# Specifying this other than '' likely destroys change detection, but at least it will make
|
||||
# CI pass when necessary...
|
||||
- ''
|
||||
exclude:
|
||||
- ansible: ''
|
||||
include:
|
||||
# 2.15
|
||||
- ansible: '2.15'
|
||||
docker: fedora37
|
||||
python: ''
|
||||
target: azp/4/
|
||||
- ansible: '2.15'
|
||||
docker: fedora37
|
||||
python: ''
|
||||
target: azp/5/
|
||||
|
||||
steps:
|
||||
- name: Perform integration testing against Ansible version ${{ matrix.ansible }} under Python ${{ matrix.python }}
|
||||
uses: felixfontein/ansible-test-gh-action@main
|
||||
with:
|
||||
ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
|
||||
ansible-core-version: stable-${{ matrix.ansible }}
|
||||
codecov-token: ${{ secrets.CODECOV_TOKEN }}
|
||||
coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
|
||||
docker-image: ${{ matrix.docker }}
|
||||
integration-continue-on-error: 'false'
|
||||
integration-diff: 'false'
|
||||
integration-retry-on-error: 'true'
|
||||
pre-test-cmd: >-
|
||||
mkdir -p ../../ansible
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
|
||||
;
|
||||
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.general.git ../../community/general
|
||||
;
|
||||
git clone --depth=1 --single-branch --branch stable-1 https://github.com/ansible-collections/community.library_inventory_filtering.git ../../community/library_inventory_filtering_v1
|
||||
${{ matrix.extra-constraints && format('; echo ''{0}'' >> tests/utils/constraints.txt', matrix.extra-constraints) || '' }}
|
||||
;
|
||||
cat tests/utils/constraints.txt
|
||||
pull-request-change-detection: 'true'
|
||||
target: ${{ matrix.target }}
|
||||
target-python-version: ${{ matrix.python }}
|
||||
testing-type: integration
|
90
projet00/collections/ansible_collections/community/docker/.github/workflows/docker-images.yml
vendored
Normal file
90
projet00/collections/ansible_collections/community/docker/.github/workflows/docker-images.yml
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Helper Docker images for testing
|
||||
on:
|
||||
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/workflows/docker-images.yml
|
||||
- tests/images/**
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/workflows/docker-images.yml
|
||||
- tests/images/**
|
||||
# Run CI once per day (at 03:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
|
||||
env:
|
||||
CONTAINER_REGISTRY: ghcr.io/ansible-collections
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build image ${{ matrix.name }}:${{ matrix.tag }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: simple-1
|
||||
tag: tag
|
||||
tag-as-latest: true
|
||||
- name: simple-2
|
||||
tag: tag
|
||||
tag-as-latest: true
|
||||
- name: healthcheck
|
||||
tag: check
|
||||
tag-as-latest: true
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get install podman buildah
|
||||
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
cache: false # true (default) results in warnings since we don't use Go modules
|
||||
|
||||
- name: Build ${{ matrix.name }} image
|
||||
run: |
|
||||
./build.sh "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}"
|
||||
working-directory: tests/images/${{ matrix.name }}
|
||||
|
||||
- name: Tag image as latest
|
||||
if: matrix.tag-as-latest && matrix.tag != 'latest'
|
||||
run: |
|
||||
podman tag "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}" "${CONTAINER_REGISTRY}/${{ matrix.name }}:latest"
|
||||
|
||||
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:${{ matrix.tag }}
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: redhat-actions/push-to-registry@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
image: ${{ matrix.name }}
|
||||
tags: ${{ matrix.tag }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:latest
|
||||
if: github.event_name != 'pull_request' && matrix.tag-as-latest && matrix.tag != 'latest'
|
||||
uses: redhat-actions/push-to-registry@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
image: ${{ matrix.name }}
|
||||
tags: latest
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
96
projet00/collections/ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
96
projet00/collections/ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Collection Docs
|
||||
concurrency:
|
||||
group: docs-pr-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, closed]
|
||||
|
||||
env:
|
||||
GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
build-docs:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Build Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main
|
||||
with:
|
||||
collection-name: community.docker
|
||||
init-lenient: false
|
||||
init-fail-on-error: true
|
||||
squash-hierarchy: true
|
||||
init-project: Community.Docker Collection
|
||||
init-copyright: Community.Docker Contributors
|
||||
init-title: Community.Docker Collection Documentation
|
||||
init-html-short-title: Community.Docker Collection Docs
|
||||
init-extra-html-theme-options: |
|
||||
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||
render-file-line: '> * `$<status>` [$<path_tail>](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/pr/${{ github.event.number }}/$<path_tail>)'
|
||||
extra-collections: community.library_inventory_filtering_v1
|
||||
|
||||
publish-docs-gh-pages:
|
||||
# for now we won't run this on forks
|
||||
if: github.repository == 'ansible-collections/community.docker'
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
needs: [build-docs]
|
||||
name: Publish Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||
with:
|
||||
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||
action: ${{ (github.event.action == 'closed' || needs.build-docs.outputs.changed != 'true') && 'teardown' || 'publish' }}
|
||||
publish-gh-pages-branch: true
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
comment:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-docs, publish-docs-gh-pages]
|
||||
name: PR comments
|
||||
steps:
|
||||
- name: PR comment
|
||||
uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main
|
||||
with:
|
||||
body-includes: '## Docs Build'
|
||||
reactions: heart
|
||||
action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }}
|
||||
on-closed-body: |
|
||||
## Docs Build 📝
|
||||
|
||||
This PR is closed and any previously published docsite has been unpublished.
|
||||
on-merged-body: |
|
||||
## Docs Build 📝
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
This PR has been merged and the docs are now incorporated into `main`:
|
||||
${{ env.GHP_BASE_URL }}/branch/main
|
||||
body: |
|
||||
## Docs Build 📝
|
||||
|
||||
Thank you for contribution!✨
|
||||
|
||||
The docs for **this PR** have been published here:
|
||||
${{ env.GHP_BASE_URL }}/pr/${{ github.event.number }}
|
||||
|
||||
You can compare to the docs for the `main` branch here:
|
||||
${{ env.GHP_BASE_URL }}/branch/main
|
||||
|
||||
The docsite for **this PR** is also available for download as an artifact from this run:
|
||||
${{ needs.build-docs.outputs.artifact-url }}
|
||||
|
||||
File changes:
|
||||
|
||||
${{ needs.build-docs.outputs.diff-files-rendered }}
|
||||
|
||||
${{ needs.build-docs.outputs.diff-rendered }}
|
56
projet00/collections/ansible_collections/community/docker/.github/workflows/docs-push.yml
vendored
Normal file
56
projet00/collections/ansible_collections/community/docker/.github/workflows/docs-push.yml
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Collection Docs
|
||||
concurrency:
|
||||
group: docs-push-${{ github.sha }}
|
||||
cancel-in-progress: true
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
tags:
|
||||
- '*'
|
||||
# Run CI once per day (at 09:00 UTC)
|
||||
schedule:
|
||||
- cron: '0 9 * * *'
|
||||
# Allow manual trigger (for newer antsibull-docs, sphinx-ansible-theme, ... versions)
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-docs:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Build Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main
|
||||
with:
|
||||
collection-name: community.docker
|
||||
init-lenient: false
|
||||
init-fail-on-error: true
|
||||
squash-hierarchy: true
|
||||
init-project: Community.Docker Collection
|
||||
init-copyright: Community.Docker Contributors
|
||||
init-title: Community.Docker Collection Documentation
|
||||
init-html-short-title: Community.Docker Collection Docs
|
||||
init-extra-html-theme-options: |
|
||||
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||
extra-collections: community.library_inventory_filtering_v1
|
||||
|
||||
publish-docs-gh-pages:
|
||||
# for now we won't run this on forks
|
||||
if: github.repository == 'ansible-collections/community.docker'
|
||||
permissions:
|
||||
contents: write
|
||||
pages: write
|
||||
id-token: write
|
||||
needs: [build-docs]
|
||||
name: Publish Ansible Docs
|
||||
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||
with:
|
||||
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||
publish-gh-pages-branch: true
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
163
projet00/collections/ansible_collections/community/docker/.github/workflows/ee.yml
vendored
Normal file
163
projet00/collections/ansible_collections/community/docker/.github/workflows/ee.yml
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: execution environment
|
||||
on:
|
||||
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
# Run CI once per day (at 04:30 UTC)
|
||||
# This ensures that even if there haven't been commits that we are still testing against latest version of ansible-builder
|
||||
schedule:
|
||||
- cron: '30 4 * * *'
|
||||
|
||||
env:
|
||||
NAMESPACE: community
|
||||
COLLECTION_NAME: docker
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test EE (${{ matrix.name }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
name:
|
||||
- ''
|
||||
ansible_core:
|
||||
- ''
|
||||
ansible_runner:
|
||||
- ''
|
||||
base_image:
|
||||
- ''
|
||||
pre_base:
|
||||
- ''
|
||||
extra_vars:
|
||||
- ''
|
||||
other_deps:
|
||||
- ''
|
||||
exclude:
|
||||
- ansible_core: ''
|
||||
include:
|
||||
- name: ansible-core devel @ RHEL UBI 9
|
||||
ansible_core: https://github.com/ansible/ansible/archive/devel.tar.gz
|
||||
ansible_runner: ansible-runner
|
||||
other_deps: |2
|
||||
python_interpreter:
|
||||
package_system: python3.11 python3.11-pip python3.11-wheel python3.11-cryptography
|
||||
python_path: "/usr/bin/python3.11"
|
||||
base_image: docker.io/redhat/ubi9:latest
|
||||
pre_base: '"#"'
|
||||
- name: ansible-core 2.15 @ Rocky Linux 9
|
||||
ansible_core: https://github.com/ansible/ansible/archive/stable-2.15.tar.gz
|
||||
ansible_runner: ansible-runner
|
||||
base_image: quay.io/rockylinux/rockylinux:9
|
||||
pre_base: '"#"'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
persist-credentials: false
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install ansible-builder and ansible-navigator
|
||||
run: pip install ansible-builder ansible-navigator
|
||||
|
||||
- name: Verify requirements
|
||||
run: ansible-builder introspect --sanitize .
|
||||
|
||||
- name: Make sure galaxy.yml has version entry
|
||||
run: >-
|
||||
python -c
|
||||
'import yaml ;
|
||||
f = open("galaxy.yml", "rb") ;
|
||||
data = yaml.safe_load(f) ;
|
||||
f.close() ;
|
||||
data["version"] = data.get("version") or "0.0.1" ;
|
||||
f = open("galaxy.yml", "wb") ;
|
||||
f.write(yaml.dump(data).encode("utf-8")) ;
|
||||
f.close() ;
|
||||
'
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Build collection
|
||||
run: |
|
||||
ansible-galaxy collection build --output-path ../../../
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
|
||||
|
||||
- name: Create files for building execution environment
|
||||
run: |
|
||||
COLLECTION_FILENAME="$(ls "${NAMESPACE}-${COLLECTION_NAME}"-*.tar.gz)"
|
||||
|
||||
# EE config
|
||||
cat > execution-environment.yml <<EOF
|
||||
---
|
||||
version: 3
|
||||
dependencies:
|
||||
ansible_core:
|
||||
package_pip: ${{ matrix.ansible_core }}
|
||||
ansible_runner:
|
||||
package_pip: ${{ matrix.ansible_runner }}
|
||||
galaxy: requirements.yml
|
||||
${{ matrix.other_deps }}
|
||||
|
||||
images:
|
||||
base_image:
|
||||
name: ${{ matrix.base_image }}
|
||||
|
||||
additional_build_files:
|
||||
- src: ${COLLECTION_FILENAME}
|
||||
dest: src
|
||||
|
||||
additional_build_steps:
|
||||
prepend_base:
|
||||
- ${{ matrix.pre_base }}
|
||||
EOF
|
||||
echo "::group::execution-environment.yml"
|
||||
cat execution-environment.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
# Requirements
|
||||
cat > requirements.yml <<EOF
|
||||
---
|
||||
collections:
|
||||
- name: src/${COLLECTION_FILENAME}
|
||||
type: file
|
||||
EOF
|
||||
echo "::group::requirements.yml"
|
||||
cat requirements.yml
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Build image based on ${{ matrix.base_image }}
|
||||
run: |
|
||||
ansible-builder build --verbosity 3 --tag test-ee:latest --container-runtime docker
|
||||
|
||||
- name: Show images
|
||||
run: docker image ls
|
||||
|
||||
- name: Make /var/run/docker.sock accessible by everyone
|
||||
run: sudo chmod a+rw /var/run/docker.sock
|
||||
|
||||
- name: Run basic tests
|
||||
run: >
|
||||
ansible-navigator run
|
||||
--mode stdout
|
||||
--container-engine docker
|
||||
--container-options=-v --container-options=/var/run/docker.sock:/var/run/docker.sock
|
||||
--pull-policy never
|
||||
--set-environment-variable ANSIBLE_PRIVATE_ROLE_VARS=true
|
||||
--execution-environment-image test-ee:latest
|
||||
-v
|
||||
all.yml
|
||||
${{ matrix.extra_vars }}
|
||||
working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}/tests/ee
|
20
projet00/collections/ansible_collections/community/docker/.github/workflows/import-galaxy.yml
vendored
Normal file
20
projet00/collections/ansible_collections/community/docker/.github/workflows/import-galaxy.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: import-galaxy
|
||||
'on':
|
||||
# Run CI against all pushes (direct commits, also merged PRs) to main, and all Pull Requests
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
import-galaxy:
|
||||
permissions:
|
||||
contents: read
|
||||
name: Test to import built collection artifact with Galaxy importer
|
||||
uses: ansible-community/github-action-test-galaxy-import/.github/workflows/test-galaxy-import.yml@main
|
33
projet00/collections/ansible_collections/community/docker/.github/workflows/reuse.yml
vendored
Normal file
33
projet00/collections/ansible_collections/community/docker/.github/workflows/reuse.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
name: Verify REUSE
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
# Run CI once per day (at 04:30 UTC)
|
||||
schedule:
|
||||
- cron: '30 4 * * *'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: REUSE Compliance Check
|
||||
uses: fsfe/reuse-action@v5
|
@ -0,0 +1,5 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
|
||||
Files: changelogs/fragments/*
|
||||
Copyright: Ansible Project
|
||||
License: GPL-3.0-or-later
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
6571
projet00/collections/ansible_collections/community/docker/FILES.json
Normal file
6571
projet00/collections/ansible_collections/community/docker/FILES.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1 @@
|
||||
../COPYING
|
@ -0,0 +1,35 @@
|
||||
{
|
||||
"collection_info": {
|
||||
"namespace": "community",
|
||||
"name": "docker",
|
||||
"version": "4.5.2",
|
||||
"authors": [
|
||||
"Ansible Docker Working Group"
|
||||
],
|
||||
"readme": "README.md",
|
||||
"tags": [
|
||||
"docker"
|
||||
],
|
||||
"description": "Modules and plugins for working with Docker",
|
||||
"license": [
|
||||
"GPL-3.0-or-later",
|
||||
"Apache-2.0"
|
||||
],
|
||||
"license_file": null,
|
||||
"dependencies": {
|
||||
"community.library_inventory_filtering_v1": ">=1.0.0"
|
||||
},
|
||||
"repository": "https://github.com/ansible-collections/community.docker",
|
||||
"documentation": "https://docs.ansible.com/ansible/latest/collections/community/docker/",
|
||||
"homepage": "https://github.com/ansible-collections/community.docker",
|
||||
"issues": "https://github.com/ansible-collections/community.docker/issues"
|
||||
},
|
||||
"file_manifest_file": {
|
||||
"name": "FILES.json",
|
||||
"ftype": "file",
|
||||
"chksum_type": "sha256",
|
||||
"chksum_sha256": "adef767a9f10155e416ebbad2bd9e07fc820fafd54659cb29722209ec7b3e857",
|
||||
"format": 1
|
||||
},
|
||||
"format": 1
|
||||
}
|
@ -0,0 +1,164 @@
|
||||
<!--
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
-->
|
||||
|
||||
# Docker Community Collection
|
||||
|
||||
[](https://docs.ansible.com/ansible/devel/collections/community/docker/)
|
||||
[](https://dev.azure.com/ansible/community.docker/_build?definitionId=25)
|
||||
[](https://github.com/ansible-collections/community.docker/actions)
|
||||
[](https://codecov.io/gh/ansible-collections/community.docker)
|
||||
[](https://api.reuse.software/info/github.com/ansible-collections/community.docker)
|
||||
|
||||
This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker.
|
||||
|
||||
Please note that this collection does **not** support Windows targets. The connection plugins included in this collection support Windows targets on a best-effort basis, but we are not testing this in CI.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
|
||||
|
||||
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
|
||||
|
||||
## Communication
|
||||
|
||||
* Join the Ansible forum:
|
||||
* [Get Help](https://forum.ansible.com/c/help/6): get help or help others. Please add appropriate tags if you start new discussions, for example the `docker`, `docker-compose`, or `docker-swarm` tags.
|
||||
* [Posts tagged with 'docker'](https://forum.ansible.com/tag/docker): subscribe to participate in Docker related conversations.
|
||||
* [Posts tagged with 'docker-compose'](https://forum.ansible.com/tag/docker-compose): subscribe to participate in Docker Compose related conversations.
|
||||
* [Posts tagged with 'docker-swarm'](https://forum.ansible.com/tag/docker-swarm): subscribe to participate in Docker Swarm related conversations.
|
||||
* [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts.
|
||||
* [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events.
|
||||
|
||||
* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes.
|
||||
|
||||
For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||
|
||||
## Tested with Ansible
|
||||
|
||||
Tested with the current ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, and ansible-core 2.18 releases, and the current development version of ansible-core. Ansible/ansible-base versions before 2.15.0 are not supported.
|
||||
|
||||
## External requirements
|
||||
|
||||
Some modules and plugins require Docker CLI, or other external, programs. Some require the [Docker SDK for Python](https://pypi.org/project/docker/) and some use [requests](https://pypi.org/project/requests/) to directly communicate with the Docker daemon API. All modules and plugins require Python 2.7 or later. Python 2.6 is no longer supported; use community.docker 2.x.y if you need to use Python 2.6.
|
||||
|
||||
Installing the Docker SDK for Python also installs the requirements for the modules and plugins that use `requests`. If you want to directly install the Python libraries instead of the SDK, you need the following ones:
|
||||
|
||||
- [requests](https://pypi.org/project/requests/);
|
||||
- [pywin32](https://pypi.org/project/pywin32/) when using named pipes on Windows with the Windows 32 API;
|
||||
- [paramiko](https://pypi.org/project/paramiko/) when using SSH to connect to the Docker daemon with `use_ssh_client=false`;
|
||||
- [pyOpenSSL](https://pypi.org/project/pyOpenSSL/) when using TLS to connect to the Docker daemon;
|
||||
- [backports.ssl_match_hostname](https://pypi.org/project/backports.ssl_match_hostname/) when using TLS to connect to the Docker daemon on Python 2.
|
||||
|
||||
If you have Docker SDK for Python < 2.0.0 installed ([docker-py](https://pypi.org/project/docker-py/)), you can still use it for modules that support it, though we recommend to uninstall it and then install [docker](https://pypi.org/project/docker/), the Docker SDK for Python >= 2.0.0. Note that both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
|
||||
|
||||
## Collection Documentation
|
||||
|
||||
Browsing the [**latest** collection documentation](https://docs.ansible.com/ansible/latest/collections/community/docker) will show docs for the _latest version released in the Ansible package_, not the latest version of the collection released on Galaxy.
|
||||
|
||||
Browsing the [**devel** collection documentation](https://docs.ansible.com/ansible/devel/collections/community/docker) shows docs for the _latest version released on Galaxy_.
|
||||
|
||||
We also separately publish [**latest commit** collection documentation](https://ansible-collections.github.io/community.docker/branch/main/) which shows docs for the _latest commit in the `main` branch_.
|
||||
|
||||
If you use the Ansible package and do not update collections independently, use **latest**. If you install or update this collection directly from Galaxy, use **devel**. If you are looking to contribute, use **latest commit**.
|
||||
|
||||
## Included content
|
||||
|
||||
* Connection plugins:
|
||||
- community.docker.docker: use Docker containers as remotes using the Docker CLI program
|
||||
- community.docker.docker_api: use Docker containers as remotes using the Docker API
|
||||
- community.docker.nsenter: execute commands on the host running the controller container
|
||||
* Inventory plugins:
|
||||
- community.docker.docker_containers: dynamic inventory plugin for Docker containers
|
||||
- community.docker.docker_machine: collect Docker machines as inventory
|
||||
- community.docker.docker_swarm: collect Docker Swarm nodes as inventory
|
||||
* Modules:
|
||||
* Docker:
|
||||
- community.docker.docker_container: manage Docker containers
|
||||
- community.docker.docker_container_copy_into: copy a file into a Docker container
|
||||
- community.docker.docker_container_exec: run commands in Docker containers
|
||||
- community.docker.docker_container_info: retrieve information on Docker containers
|
||||
- community.docker.docker_host_info: retrieve information on the Docker daemon
|
||||
- community.docker.docker_image: manage Docker images
|
||||
- community.docker.docker_image_build: build Docker images using Docker buildx
|
||||
- community.docker.docker_image_export: export (archive) Docker images
|
||||
- community.docker.docker_image_info: retrieve information on Docker images
|
||||
- community.docker.docker_image_load: load Docker images from archives
|
||||
- community.docker.docker_image_pull: pull Docker images from registries
|
||||
- community.docker.docker_image_push: push Docker images to registries
|
||||
- community.docker.docker_image_remove: remove Docker images
|
||||
- community.docker.docker_image_tag: tag Docker images with new names and/or tags
|
||||
- community.docker.docker_login: log in and out to/from registries
|
||||
- community.docker.docker_network: manage Docker networks
|
||||
- community.docker.docker_network_info: retrieve information on Docker networks
|
||||
- community.docker.docker_plugin: manage Docker plugins
|
||||
- community.docker.docker_prune: prune Docker containers, images, networks, volumes, and build data
|
||||
- community.docker.docker_volume: manage Docker volumes
|
||||
- community.docker.docker_volume_info: retrieve information on Docker volumes
|
||||
* Docker Compose:
|
||||
- community.docker.docker_compose_v2: manage Docker Compose files (Docker compose CLI plugin)
|
||||
- community.docker.docker_compose_v2_exec: run command in a container of a Compose service
|
||||
- community.docker.docker_compose_v2_pull: pull a Docker compose project
|
||||
- community.docker.docker_compose_v2_run: run command in a new container of a Compose service
|
||||
* Docker Swarm:
|
||||
- community.docker.docker_config: manage configurations
|
||||
- community.docker.docker_node: manage Docker Swarm nodes
|
||||
- community.docker.docker_node_info: retrieve information on Docker Swarm nodes
|
||||
- community.docker.docker_secret: manage secrets
|
||||
- community.docker.docker_swarm: manage Docker Swarm
|
||||
- community.docker.docker_swarm_info: retrieve information on Docker Swarm
|
||||
- community.docker.docker_swarm_service: manage Docker Swarm services
|
||||
- community.docker.docker_swarm_service_info: retrieve information on Docker Swarm services
|
||||
* Docker Stack:
|
||||
- community.docker.docker_stack: manage Docker Stacks
|
||||
- community.docker.docker_stack_info: retrieve information on Docker Stacks
|
||||
- community.docker.docker_stack_task_info: retrieve information on tasks in Docker Stacks
|
||||
* Other:
|
||||
- current_container_facts: return facts about whether the module runs in a Docker container
|
||||
|
||||
## Using this collection
|
||||
|
||||
Before using the Docker community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||
|
||||
ansible-galaxy collection install community.docker
|
||||
|
||||
You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
|
||||
|
||||
```yaml
|
||||
collections:
|
||||
- name: community.docker
|
||||
```
|
||||
|
||||
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||
|
||||
## Contributing to this collection
|
||||
|
||||
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||
|
||||
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||
|
||||
## Release notes
|
||||
|
||||
See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.md).
|
||||
|
||||
## More information
|
||||
|
||||
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
|
||||
- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
|
||||
- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
|
||||
- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst)
|
||||
- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
|
||||
- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
|
||||
- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45)
|
||||
|
||||
## Licensing
|
||||
|
||||
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
|
||||
|
||||
See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.docker/blob/main/COPYING) for the full text.
|
||||
|
||||
Parts of the collection are licensed under the [Apache 2.0 license](https://github.com/ansible-collections/community.docker/blob/main/LICENSES/Apache-2.0.txt). This mostly applies to files vendored from the [Docker SDK for Python](https://github.com/docker/docker-py/).
|
||||
|
||||
All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
SPDX-FileCopyrightText: Ansible Project
|
@ -0,0 +1,41 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
changelog_filename_template: ../CHANGELOG.rst
|
||||
changelog_filename_version_depth: 0
|
||||
changes_file: changelog.yaml
|
||||
changes_format: combined
|
||||
keep_fragments: false
|
||||
mention_ancestor: true
|
||||
new_plugins_after_name: removed_features
|
||||
notesdir: fragments
|
||||
output_formats:
|
||||
- md
|
||||
- rst
|
||||
prelude_section_name: release_summary
|
||||
prelude_section_title: Release Summary
|
||||
sections:
|
||||
- - major_changes
|
||||
- Major Changes
|
||||
- - minor_changes
|
||||
- Minor Changes
|
||||
- - breaking_changes
|
||||
- Breaking Changes / Porting Guide
|
||||
- - deprecated_features
|
||||
- Deprecated Features
|
||||
- - removed_features
|
||||
- Removed Features (previously deprecated)
|
||||
- - security_fixes
|
||||
- Security Fixes
|
||||
- - bugfixes
|
||||
- Bugfixes
|
||||
- - known_issues
|
||||
- Known Issues
|
||||
title: Docker Community Collection
|
||||
trivial_section_name: trivial
|
||||
use_fqcn: true
|
||||
add_plugin_period: true
|
||||
changelog_nice_yaml: true
|
||||
changelog_sort: version
|
@ -0,0 +1,18 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# The following `.. envvar::` directives are defined in the extra docsite docs:
|
||||
envvar_directives:
|
||||
- DOCKER_HOST
|
||||
- DOCKER_API_VERSION
|
||||
- DOCKER_TIMEOUT
|
||||
- DOCKER_CERT_PATH
|
||||
- DOCKER_SSL_VERSION
|
||||
- DOCKER_TLS
|
||||
- DOCKER_TLS_HOSTNAME
|
||||
- DOCKER_TLS_VERIFY
|
||||
|
||||
changelog:
|
||||
write_changelog: true
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
sections:
|
||||
- title: Scenario Guide
|
||||
toctree:
|
||||
- scenario_guide
|
@ -0,0 +1,43 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
edit_on_github:
|
||||
repository: ansible-collections/community.docker
|
||||
branch: main
|
||||
path_prefix: ''
|
||||
|
||||
extra_links:
|
||||
- description: Ask for help (Docker)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker
|
||||
- description: Ask for help (Docker Compose)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker-compose
|
||||
- description: Ask for help (Docker Swarm)
|
||||
url: https://forum.ansible.com/tags/c/help/6/none/docker-swarm
|
||||
- description: Submit a bug report
|
||||
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md
|
||||
- description: Request a feature
|
||||
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=feature_request.md
|
||||
|
||||
communication:
|
||||
matrix_rooms:
|
||||
- topic: General usage and support questions
|
||||
room: '#users:ansible.im'
|
||||
irc_channels:
|
||||
- topic: General usage and support questions
|
||||
network: Libera
|
||||
channel: '#ansible'
|
||||
forums:
|
||||
- topic: "Ansible Forum: General usage and support questions"
|
||||
# The following URL directly points to the "Get Help" section
|
||||
url: https://forum.ansible.com/c/help/6/none
|
||||
- topic: "Ansible Forum: Discussions about Docker"
|
||||
# The following URL directly points to the "docker" tag
|
||||
url: https://forum.ansible.com/tag/docker
|
||||
- topic: "Ansible Forum: Discussions about Docker Compose"
|
||||
# The following URL directly points to the "docker-compose" tag
|
||||
url: https://forum.ansible.com/tag/docker-compose
|
||||
- topic: "Ansible Forum: Discussions about Docker Swarm"
|
||||
# The following URL directly points to the "docker-swarm" tag
|
||||
url: https://forum.ansible.com/tag/docker-swarm
|
@ -0,0 +1,335 @@
|
||||
..
|
||||
Copyright (c) Ansible Project
|
||||
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
.. _ansible_collections.community.docker.docsite.scenario_guide:
|
||||
|
||||
Docker Guide
|
||||
============
|
||||
|
||||
The `community.docker collection <https://galaxy.ansible.com/ui/repo/published/community/docker/>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 1
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ansplugin:`community.general.python_requirements_info module <community.general.python_requirements_info#module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
|
||||
|
||||
Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``.
|
||||
|
||||
You can install the Docker SDK for Python for Python 3.6 or later as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install docker
|
||||
|
||||
For Python 2.7, you need to use a version between 2.0.0 and 4.4.4 since the Python package for Docker removed support for Python 2.7 on 5.0.0. You can install the specific version of the Docker SDK for Python as follows:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'docker==4.4.4'
|
||||
|
||||
Note that the Docker SDK for Python was called ``docker-py`` on PyPi before version 2.0.0. Please avoid installing this really old version, and make sure to not install both ``docker`` and ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version. If in doubt, always install ``docker`` and never ``docker-py``.
|
||||
|
||||
|
||||
Connecting to the Docker API
|
||||
----------------------------
|
||||
|
||||
You can connect to a local or remote API using parameters passed to each task or by setting environment variables. The order of precedence is command line parameters and then environment variables. If neither a command line option nor an environment variable is found, Ansible uses the default value provided under `Parameters`_.
|
||||
|
||||
|
||||
Parameters
|
||||
..........
|
||||
|
||||
Most plugins and modules can be configured by the following parameters:
|
||||
|
||||
docker_host
|
||||
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix:///var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace ``tcp`` in the connection URL with ``https``.
|
||||
|
||||
api_version
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed.
|
||||
|
||||
timeout
|
||||
The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
|
||||
|
||||
tls
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to ``false``.
|
||||
|
||||
validate_certs
|
||||
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``.
|
||||
|
||||
ca_path
|
||||
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
|
||||
cert_path
|
||||
Path to the client's TLS certificate file.
|
||||
|
||||
key_path
|
||||
Path to the client's TLS key file.
|
||||
|
||||
tls_hostname
|
||||
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to ``localhost``.
|
||||
|
||||
ssl_version
|
||||
Provide a valid SSL version number. The default value is determined by the Docker SDK for Python.
|
||||
|
||||
This option is not available for the CLI based plugins. It is mainly needed for legacy systems and should be avoided.
|
||||
|
||||
|
||||
Module default group
|
||||
....................
|
||||
|
||||
To avoid having to specify common parameters for all the modules in every task, you can use the ``community.docker.docker`` :ref:`module defaults group <module_defaults_groups>`, or its short name ``docker``.
|
||||
|
||||
.. note::
|
||||
|
||||
Module default groups only work for modules, not for plugins (connection and inventory plugins).
|
||||
|
||||
The following example shows how the module default group can be used in a playbook:
|
||||
|
||||
.. code-block:: yaml+jinja
|
||||
|
||||
---
|
||||
- name: Pull and image and start the container
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
module_defaults:
|
||||
group/community.docker.docker:
|
||||
# Select Docker Daemon on other host
|
||||
docker_host: tcp://192.0.2.23:2376
|
||||
# Configure TLS
|
||||
tls: true
|
||||
validate_certs: true
|
||||
tls_hostname: docker.example.com
|
||||
ca_path: /path/to/cacert.pem
|
||||
# Increase timeout
|
||||
timeout: 120
|
||||
tasks:
|
||||
- name: Pull image
|
||||
community.docker.docker_image_pull:
|
||||
name: python
|
||||
tag: 3.12
|
||||
|
||||
- name: Start container
|
||||
community.docker.docker_container:
|
||||
cleanup: true
|
||||
command: python --version
|
||||
detach: false
|
||||
image: python:3.12
|
||||
name: my-python-container
|
||||
output_logs: true
|
||||
|
||||
- name: Show output
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ output.container.Output }}"
|
||||
|
||||
Here the two ``community.docker`` tasks will use the options set for the module defaults group.
|
||||
|
||||
|
||||
Environment variables
|
||||
.....................
|
||||
|
||||
You can also control how the plugins and modules connect to the Docker API by setting the following environment variables.
|
||||
|
||||
For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with.
|
||||
|
||||
.. envvar:: DOCKER_HOST
|
||||
|
||||
The URL or Unix socket path used to connect to the Docker API.
|
||||
|
||||
.. envvar:: DOCKER_API_VERSION
|
||||
|
||||
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||
by Docker SDK for Python.
|
||||
|
||||
.. envvar:: DOCKER_TIMEOUT
|
||||
|
||||
The maximum amount of time in seconds to wait on a response from the API.
|
||||
|
||||
.. envvar:: DOCKER_CERT_PATH
|
||||
|
||||
Path to the directory containing the client certificate, client key and CA certificate.
|
||||
|
||||
.. envvar:: DOCKER_SSL_VERSION
|
||||
|
||||
Provide a valid SSL version number.
|
||||
|
||||
.. envvar:: DOCKER_TLS
|
||||
|
||||
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||
|
||||
.. envvar:: DOCKER_TLS_HOSTNAME
|
||||
|
||||
When verifying the authenticity of the Docker Host, uses this hostname to compare to the host's certificate.
|
||||
|
||||
.. envvar:: DOCKER_TLS_VERIFY
|
||||
|
||||
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||
|
||||
|
||||
Plain Docker daemon: images, networks, volumes, and containers
|
||||
--------------------------------------------------------------
|
||||
|
||||
For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available:
|
||||
|
||||
docker connection plugin
|
||||
The :ansplugin:`community.docker.docker connection plugin <community.docker.docker#connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ansplugin:`ansible.posix.synchronize module <ansible.posix.synchronize#module>`.
|
||||
|
||||
docker_api connection plugin
|
||||
The :ansplugin:`community.docker.docker_api connection plugin <community.docker.docker_api#connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
|
||||
|
||||
docker_containers inventory plugin
|
||||
The :ansplugin:`community.docker.docker_containers inventory plugin <community.docker.docker_containers#inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
|
||||
|
||||
The `docker inventory script <https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source.
|
||||
|
||||
docker_host_info module
|
||||
The :ansplugin:`community.docker.docker_host_info module <community.docker.docker_host_info#module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
|
||||
|
||||
docker_login module
|
||||
The :ansplugin:`community.docker.docker_login module <community.docker.docker_login#module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
|
||||
|
||||
docker_prune module
|
||||
The :ansplugin:`community.docker.docker_prune module <community.docker.docker_prune#module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
|
||||
|
||||
docker_image module
|
||||
The :ansplugin:`community.docker.docker_image module <community.docker.docker_image#module>` provides full control over images, including: build, pull, push, tag and remove.
|
||||
|
||||
docker_image_build
|
||||
The :ansplugin:`community.docker.docker_image_build module <community.docker.docker_image_build#module>` allows you to build a Docker image using Docker buildx.
|
||||
|
||||
docker_image_export module
|
||||
The :ansplugin:`community.docker.docker_image_export module <community.docker.docker_image_export#module>` allows you to export (archive) images.
|
||||
|
||||
docker_image_info module
|
||||
The :ansplugin:`community.docker.docker_image_info module <community.docker.docker_image_info#module>` allows you to list and inspect images.
|
||||
|
||||
docker_image_load
|
||||
The :ansplugin:`community.docker.docker_image_load module <community.docker.docker_image_load#module>` allows you to import one or multiple images from tarballs.
|
||||
|
||||
docker_image_pull
|
||||
The :ansplugin:`community.docker.docker_image_pull module <community.docker.docker_image_pull#module>` allows you to pull a Docker image from a registry.
|
||||
|
||||
docker_image_push
|
||||
The :ansplugin:`community.docker.docker_image_push module <community.docker.docker_image_push#module>` allows you to push a Docker image to a registry.
|
||||
|
||||
docker_image_remove
|
||||
The :ansplugin:`community.docker.docker_image_remove module <community.docker.docker_image_remove#module>` allows you to remove and/or untag a Docker image from the Docker daemon.
|
||||
|
||||
docker_image_tag
|
||||
The :ansplugin:`community.docker.docker_image_tag module <community.docker.docker_image_tag#module>` allows you to tag a Docker image with additional names and/or tags.
|
||||
|
||||
docker_network module
|
||||
The :ansplugin:`community.docker.docker_network module <community.docker.docker_network#module>` provides full control over Docker networks.
|
||||
|
||||
docker_network_info module
|
||||
The :ansplugin:`community.docker.docker_network_info module <community.docker.docker_network_info#module>` allows you to inspect Docker networks.
|
||||
|
||||
docker_volume_info module
|
||||
The :ansplugin:`community.docker.docker_volume_info module <community.docker.docker_volume_info#module>` provides full control over Docker volumes.
|
||||
|
||||
docker_volume module
|
||||
The :ansplugin:`community.docker.docker_volume module <community.docker.docker_volume#module>` allows you to inspect Docker volumes.
|
||||
|
||||
docker_container module
|
||||
The :ansplugin:`community.docker.docker_container module <community.docker.docker_container#module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
|
||||
|
||||
docker_container_copy_into
|
||||
The :ansplugin:`community.docker.docker_container_copy_into module <community.docker.docker_container_copy_into#module>` allows you to copy files from the control node into a container.
|
||||
|
||||
docker_container_exec
|
||||
The :ansplugin:`community.docker.docker_container_exec module <community.docker.docker_container_exec#module>` allows you to execute commands in a running container.
|
||||
|
||||
docker_container_info module
|
||||
The :ansplugin:`community.docker.docker_container_info module <community.docker.docker_container_info#module>` allows you to inspect a Docker container.
|
||||
|
||||
docker_plugin
|
||||
The :ansplugin:`community.docker.docker_plugin module <community.docker.docker_plugin#module>` allows you to manage Docker plugins.
|
||||
|
||||
|
||||
Docker Compose
|
||||
--------------
|
||||
|
||||
Docker Compose v2
|
||||
.................
|
||||
|
||||
There are two modules for working with Docker compose projects:
|
||||
|
||||
community.docker.docker_compose_v2
|
||||
The :ansplugin:`community.docker.docker_compose_v2 module <community.docker.docker_compose_v2#module>` allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||
|
||||
community.docker.docker_compose_v2_pull
|
||||
The :ansplugin:`community.docker.docker_compose_v2_pull module <community.docker.docker_compose_v2_pull#module>` allows you to pull Docker compose projects.
|
||||
|
||||
These modules use the Docker CLI "compose" plugin (``docker compose``), and thus needs access to the Docker CLI tool.
|
||||
No further requirements next to to the CLI tool and its Docker Compose plugin are needed.
|
||||
|
||||
Docker Compose v1
|
||||
.................
|
||||
|
||||
The deprecated :ansplugin:`community.docker.docker_compose module <community.docker.docker_compose#module>`
|
||||
allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||
This module uses the out-dated and End of Life version 1.x of Docker Compose. It should mainly be used for legacy systems
|
||||
which still have to use that version of Docker Compose. **The module is deprecated and will be removed from community.docker 4.0.0.**
|
||||
Please use the Docker Compose v2 modules instead.
|
||||
|
||||
You need to install the `old Python docker-compose <https://pypi.org/project/docker-compose/>`_ on the remote machines to use the Docker Compose v1 module.
|
||||
|
||||
|
||||
Docker Machine
|
||||
--------------
|
||||
|
||||
The :ansplugin:`community.docker.docker_machine inventory plugin <community.docker.docker_machine#inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
|
||||
|
||||
|
||||
Docker Swarm stack
|
||||
------------------
|
||||
|
||||
The :ansplugin:`community.docker.docker_stack module <community.docker.docker_stack#module>` module allows you to control Docker Swarm stacks. Information on Swarm stacks can be retrieved by the :ansplugin:`community.docker.docker_stack_info module <community.docker.docker_stack_info#module>`, and information on Swarm stack tasks can be retrieved by the :ansplugin:`community.docker.docker_stack_task_info module <community.docker.docker_stack_task_info#module>`.
|
||||
|
||||
|
||||
Docker Swarm
|
||||
------------
|
||||
|
||||
The community.docker collection provides multiple plugins and modules for managing Docker Swarms.
|
||||
|
||||
Swarm management
|
||||
................
|
||||
|
||||
One inventory plugin and several modules are provided to manage Docker Swarms:
|
||||
|
||||
docker_swarm inventory plugin
|
||||
The :ansplugin:`community.docker.docker_swarm inventory plugin <community.docker.docker_swarm#inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
|
||||
|
||||
docker_swarm module
|
||||
The :ansplugin:`community.docker.docker_swarm module <community.docker.docker_swarm#module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
|
||||
|
||||
docker_swarm_info module
|
||||
The :ansplugin:`community.docker.docker_swarm_info module <community.docker.docker_swarm_info#module>` allows you to retrieve information on Docker Swarm.
|
||||
|
||||
docker_node module
|
||||
The :ansplugin:`community.docker.docker_node module <community.docker.docker_node#module>` allows you to manage Docker Swarm nodes.
|
||||
|
||||
docker_node_info module
|
||||
The :ansplugin:`community.docker.docker_node_info module <community.docker.docker_node_info#module>` allows you to retrieve information on Docker Swarm nodes.
|
||||
|
||||
Configuration management
|
||||
........................
|
||||
|
||||
The community.docker collection offers modules to manage Docker Swarm configurations and secrets:
|
||||
|
||||
docker_config module
|
||||
The :ansplugin:`community.docker.docker_config module <community.docker.docker_config#module>` allows you to create and modify Docker Swarm configs.
|
||||
|
||||
docker_secret module
|
||||
The :ansplugin:`community.docker.docker_secret module <community.docker.docker_secret#module>` allows you to create and modify Docker Swarm secrets.
|
||||
|
||||
Swarm services
|
||||
..............
|
||||
|
||||
Docker Swarm services can be created and updated with the :ansplugin:`community.docker.docker_swarm_service module <community.docker.docker_swarm_service#module>`, and information on them can be queried by the :ansplugin:`community.docker.docker_swarm_service_info module <community.docker.docker_swarm_service_info#module>`.
|
@ -0,0 +1,3 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
@ -0,0 +1,16 @@
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
docker
|
||||
urllib3
|
||||
requests
|
||||
paramiko
|
||||
pyyaml
|
||||
|
||||
# We assume that EEs are not based on Windows, and have Python >= 3.5.
|
||||
# (ansible-builder does not support conditionals, it will simply add
|
||||
# the following unconditionally to the requirements)
|
||||
#
|
||||
# pywin32 ; sys_platform == 'win32'
|
||||
# backports.ssl-match-hostname ; python_version < '3.5'
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
version: 1
|
||||
dependencies:
|
||||
python: meta/ee-requirements.txt
|
||||
system: meta/ee-bindep.txt
|
@ -0,0 +1,51 @@
|
||||
---
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
requires_ansible: '>=2.15.0'
|
||||
action_groups:
|
||||
docker:
|
||||
- docker_compose_v2
|
||||
- docker_compose_v2_exec
|
||||
- docker_compose_v2_pull
|
||||
- docker_compose_v2_run
|
||||
- docker_config
|
||||
- docker_container
|
||||
- docker_container_copy_into
|
||||
- docker_container_exec
|
||||
- docker_container_info
|
||||
- docker_host_info
|
||||
- docker_image
|
||||
- docker_image_build
|
||||
- docker_image_export
|
||||
- docker_image_info
|
||||
- docker_image_load
|
||||
- docker_image_pull
|
||||
- docker_image_push
|
||||
- docker_image_remove
|
||||
- docker_image_tag
|
||||
- docker_login
|
||||
- docker_network
|
||||
- docker_network_info
|
||||
- docker_node
|
||||
- docker_node_info
|
||||
- docker_plugin
|
||||
- docker_prune
|
||||
- docker_secret
|
||||
- docker_stack
|
||||
- docker_stack_info
|
||||
- docker_stack_task_info
|
||||
- docker_swarm
|
||||
- docker_swarm_info
|
||||
- docker_swarm_service
|
||||
- docker_swarm_service_info
|
||||
- docker_volume
|
||||
- docker_volume_info
|
||||
|
||||
plugin_routing:
|
||||
modules:
|
||||
docker_compose:
|
||||
tombstone:
|
||||
removal_version: 4.0.0
|
||||
warning_text: This module uses docker-compose v1, which is End of Life since July 2022. Please migrate to community.docker.docker_compose_v2.
|
@ -0,0 +1,40 @@
|
||||
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._scramble import unscramble
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
# Set to True when transferring files to the remote
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
self._supports_check_mode = True
|
||||
self._supports_async = True
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
del tmp # tmp no longer has any effect
|
||||
|
||||
self._task.args['_max_file_size_for_diff'] = C.MAX_FILE_SIZE_FOR_DIFF
|
||||
|
||||
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
|
||||
|
||||
if u'diff' in result and result[u'diff'].get(u'scrambled_diff'):
|
||||
# Scrambling is not done for security, but to avoid no_log screwing up the diff
|
||||
diff = result[u'diff']
|
||||
key = base64.b64decode(diff.pop(u'scrambled_diff'))
|
||||
for k in (u'before', u'after'):
|
||||
if k in diff:
|
||||
diff[k] = unscramble(diff[k], key)
|
||||
|
||||
return result
|
@ -0,0 +1,510 @@
|
||||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
name: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the Docker CLI to execute commands in the container. If you prefer to directly connect to the Docker daemon, use
|
||||
the P(community.docker.docker_api#connection) connection plugin.
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_docker_extra_args
|
||||
ini:
|
||||
- section: docker_connection
|
||||
key: extra_cli_args
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker CLI version 18.06 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.selectors import selectors
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.docker.docker'
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we do not check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
self._docker_args = []
|
||||
self._container_user_cache = {}
|
||||
self._version = None
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
if 'docker_command' in kwargs:
|
||||
self.docker_cmd = kwargs['docker_command']
|
||||
else:
|
||||
try:
|
||||
self.docker_cmd = get_bin_path('docker')
|
||||
except ValueError:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
|
||||
version = re.sub(u'^v', u'', version)
|
||||
return version
|
||||
|
||||
def _old_docker_version(self):
|
||||
cmd_args = self._docker_args
|
||||
|
||||
old_version_subcommand = ['version']
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self):
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = self._docker_args
|
||||
|
||||
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_native(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self):
|
||||
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
|
||||
if line.startswith(u'Server version:'): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
container = self.get_option('remote_addr')
|
||||
if container in self._container_user_cache:
|
||||
return self._container_user_cache[container]
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
user = out.strip() or u'root'
|
||||
self._container_user_cache[container] = user
|
||||
return user
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._docker_args:
|
||||
local_cmd += self._docker_args
|
||||
|
||||
local_cmd += [b'exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b'-u', self.remote_user]
|
||||
|
||||
if self.get_option('extra_env'):
|
||||
for k, v in self.get_option('extra_env').items():
|
||||
for val, what in ((k, 'Key'), (v, 'Value')):
|
||||
if not isinstance(val, string_types):
|
||||
raise AnsibleConnectionFailure(
|
||||
'Non-string {0} found for extra_env option. Ambiguous env options must be '
|
||||
'wrapped in quotes to avoid them being interpreted. {1}: {2!r}'
|
||||
.format(what.lower(), what, val)
|
||||
)
|
||||
local_cmd += [b'-e', b'%s=%s' % (to_bytes(k, errors='surrogate_or_strict'), to_bytes(v, errors='surrogate_or_strict'))]
|
||||
|
||||
if self.get_option('working_dir') is not None:
|
||||
local_cmd += [b'-w', to_bytes(self.get_option('working_dir'), errors='surrogate_or_strict')]
|
||||
if self.docker_version != u'dev' and LooseVersion(self.docker_version) < LooseVersion(u'18.06'):
|
||||
# https://github.com/docker/cli/pull/732, first appeared in release 18.06.0
|
||||
raise AnsibleConnectionFailure(
|
||||
'Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {0}.'
|
||||
.format(self.docker_version)
|
||||
)
|
||||
|
||||
if self.get_option('privileged'):
|
||||
local_cmd += [b'--privileged']
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _set_docker_args(self):
|
||||
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
|
||||
# docker arguments
|
||||
del self._docker_args[:]
|
||||
extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
|
||||
if extra_args:
|
||||
self._docker_args += extra_args.split(' ')
|
||||
|
||||
def _set_conn_data(self):
|
||||
|
||||
''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
|
||||
|
||||
self._set_docker_args()
|
||||
|
||||
self.remote_user = self.get_option('remote_user')
|
||||
if self.remote_user is None and self._play_context.remote_user is not None:
|
||||
self.remote_user = self._play_context.remote_user
|
||||
|
||||
# timeout, use unless default and pc is different, backwards compat
|
||||
self.timeout = self.get_option('container_timeout')
|
||||
if self.timeout == 10 and self.timeout != self._play_context.timeout:
|
||||
self.timeout = self._play_context.timeout
|
||||
|
||||
@property
|
||||
def docker_version(self):
|
||||
|
||||
if not self._version:
|
||||
self._set_docker_args()
|
||||
|
||||
self._version = self._get_docker_version()
|
||||
if self._version == u'dev':
|
||||
display.warning(u'Docker version number is "dev". Will assume latest version.')
|
||||
if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
return self._version
|
||||
|
||||
def _get_actual_user(self):
|
||||
if self.remote_user is not None:
|
||||
# An explicit user is provided
|
||||
if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
return self.remote_user
|
||||
else:
|
||||
self.remote_user = None
|
||||
actual_user = self._get_docker_remote_user()
|
||||
if actual_user != self.get_option('remote_user'):
|
||||
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(self.docker_version, self.actual_user or u'?'))
|
||||
return actual_user
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we do not need to.
|
||||
return self._get_docker_remote_user()
|
||||
else:
|
||||
return None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
self._set_conn_data()
|
||||
actual_user = self._get_actual_user()
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
|
||||
self._set_conn_data()
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
|
||||
p = subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
self._set_conn_data()
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % to_native(in_path))
|
||||
|
||||
out_path = shlex_quote(out_path)
|
||||
# Older docker does not have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
|
||||
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
self._set_conn_data()
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker does not have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
|
||||
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=out_file, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
# Clear container user cache
|
||||
self._container_user_cache = {}
|
@ -0,0 +1,399 @@
|
||||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
name: docker_api
|
||||
short_description: Run tasks in docker containers
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the L(requests library,https://pypi.org/project/requests/) to interact directly with the Docker daemon instead of
|
||||
using the Docker CLI. Use the P(community.docker.docker#connection) connection plugin if you want to use the Docker CLI.
|
||||
notes:
|
||||
- Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection
|
||||
with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
extends_documentation_fragment:
|
||||
- community.docker.docker.api_documentation
|
||||
- community.docker.docker.var_names
|
||||
options:
|
||||
remote_user:
|
||||
type: str
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
remote_addr:
|
||||
type: str
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker API version 1.35 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import os
|
||||
import os.path
|
||||
|
||||
from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils.copy import (
|
||||
DockerFileCopyError,
|
||||
DockerFileNotFound,
|
||||
fetch_file,
|
||||
put_file,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
|
||||
DockerSocketHandler,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
||||
transport = 'community.docker.docker_api'
|
||||
has_pipelining = True
|
||||
|
||||
def _call_client(self, callable, not_found_can_be_resource=False):
|
||||
try:
|
||||
return callable()
|
||||
except NotFound as e:
|
||||
if not_found_can_be_resource:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
|
||||
else:
|
||||
raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
|
||||
self.client.fail(
|
||||
'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
except DockerException as e:
|
||||
self.client.fail(
|
||||
'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
except RequestException as e:
|
||||
self.client.fail(
|
||||
'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'
|
||||
.format(e, self.get_option('remote_addr'))
|
||||
)
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self.client = None
|
||||
self.ids = dict()
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = ('.ps1', '.exe', '')
|
||||
|
||||
self.actual_user = None
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
self.actual_user = self.get_option('remote_user')
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or u'?'), host=self.get_option('remote_addr')
|
||||
)
|
||||
if self.client is None:
|
||||
self.client = AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||
self._connected = True
|
||||
|
||||
if self.actual_user is None and display.verbosity > 2:
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we do not need to
|
||||
display.vvv(u"Trying to determine actual user")
|
||||
result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
|
||||
if result.get('Config'):
|
||||
self.actual_user = result['Config'].get('User')
|
||||
if self.actual_user is not None:
|
||||
display.vvv(u"Actual user is '{0}'".format(self.actual_user))
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
""" Run a command on the docker host """
|
||||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
command = [self._play_context.executable, '-c', to_text(cmd)]
|
||||
|
||||
do_become = self.become and self.become.expect_prompt() and sudoable
|
||||
|
||||
display.vvv(
|
||||
u"EXEC {0}{1}{2}".format(
|
||||
to_text(command),
|
||||
', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
|
||||
', with become prompt' if do_become else '',
|
||||
),
|
||||
host=self.get_option('remote_addr')
|
||||
)
|
||||
|
||||
need_stdin = True if (in_data is not None) or do_become else False
|
||||
|
||||
data = {
|
||||
'Container': self.get_option('remote_addr'),
|
||||
'User': self.get_option('remote_user') or '',
|
||||
'Privileged': self.get_option('privileged'),
|
||||
'Tty': False,
|
||||
'AttachStdin': need_stdin,
|
||||
'AttachStdout': True,
|
||||
'AttachStderr': True,
|
||||
'Cmd': command,
|
||||
}
|
||||
|
||||
if 'detachKeys' in self.client._general_configs:
|
||||
data['detachKeys'] = self.client._general_configs['detachKeys']
|
||||
|
||||
if self.get_option('extra_env'):
|
||||
data['Env'] = []
|
||||
for k, v in self.get_option('extra_env').items():
|
||||
for val, what in ((k, 'Key'), (v, 'Value')):
|
||||
if not isinstance(val, string_types):
|
||||
raise AnsibleConnectionFailure(
|
||||
'Non-string {0} found for extra_env option. Ambiguous env options must be '
|
||||
'wrapped in quotes to avoid them being interpreted. {1}: {2!r}'
|
||||
.format(what.lower(), what, val)
|
||||
)
|
||||
data['Env'].append(u'{0}={1}'.format(to_text(k, errors='surrogate_or_strict'), to_text(v, errors='surrogate_or_strict')))
|
||||
|
||||
if self.get_option('working_dir') is not None:
|
||||
data['WorkingDir'] = self.get_option('working_dir')
|
||||
if self.client.docker_api_version < LooseVersion('1.35'):
|
||||
raise AnsibleConnectionFailure(
|
||||
'Providing the working directory requires Docker API version 1.35 or newer.'
|
||||
' The Docker daemon the connection is using has API version {0}.'
|
||||
.format(self.client.docker_api_version_str)
|
||||
)
|
||||
|
||||
exec_data = self._call_client(lambda: self.client.post_json_to_json('/containers/{0}/exec', self.get_option('remote_addr'), data=data))
|
||||
exec_id = exec_data['Id']
|
||||
|
||||
data = {
|
||||
'Tty': False,
|
||||
'Detach': False
|
||||
}
|
||||
if need_stdin:
|
||||
exec_socket = self._call_client(lambda: self.client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data))
|
||||
try:
|
||||
with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
|
||||
if do_become:
|
||||
become_output = [b'']
|
||||
|
||||
def append_become_output(stream_id, data):
|
||||
become_output[0] += data
|
||||
|
||||
exec_socket_handler.set_block_done_callback(append_become_output)
|
||||
|
||||
while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
|
||||
if not exec_socket_handler.select(self.get_option('container_timeout')):
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
|
||||
|
||||
if exec_socket_handler.is_eof():
|
||||
raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
|
||||
|
||||
if not self.become.check_success(become_output[0]):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
|
||||
if in_data is not None:
|
||||
exec_socket_handler.write(in_data)
|
||||
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
finally:
|
||||
exec_socket.close()
|
||||
else:
|
||||
stdout, stderr = self._call_client(lambda: self.client.post_json_to_stream(
|
||||
'/exec/{0}/start', exec_id, stream=False, demux=True, tty=False, data=data))
|
||||
|
||||
result = self._call_client(lambda: self.client.get_json('/exec/{0}/json', exec_id))
|
||||
|
||||
return result.get('ExitCode') or 0, stdout or b'', stderr or b''
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
'''
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
return ntpath.normpath(remote_path)
|
||||
else:
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
""" Transfer a file from local to docker container """
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
|
||||
if self.actual_user not in self.ids:
|
||||
dummy, ids, dummy = self.exec_command(b'id -u && id -g')
|
||||
try:
|
||||
user_id, group_id = ids.splitlines()
|
||||
self.ids[self.actual_user] = int(user_id), int(group_id)
|
||||
display.vvvv(
|
||||
'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
|
||||
host=self.get_option('remote_addr')
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleConnectionFailure(
|
||||
'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
|
||||
.format(e, self.get_option('remote_addr'), ids)
|
||||
)
|
||||
|
||||
user_id, group_id = self.ids[self.actual_user]
|
||||
try:
|
||||
self._call_client(
|
||||
lambda: put_file(
|
||||
self.client,
|
||||
container=self.get_option('remote_addr'),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
user_name=self.actual_user,
|
||||
follow_links=True,
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_native(exc))
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_native(exc))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
|
||||
try:
|
||||
self._call_client(
|
||||
lambda: fetch_file(
|
||||
self.client,
|
||||
container=self.get_option('remote_addr'),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
follow_links=True,
|
||||
log=lambda msg: display.vvvv(msg, host=self.get_option('remote_addr')),
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_native(exc))
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_native(exc))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
||||
|
||||
def reset(self):
|
||||
self.ids.clear()
|
@ -0,0 +1,238 @@
|
||||
# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||
# Based on Ansible local connection plugin by:
|
||||
# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: nsenter
|
||||
short_description: execute on host running controller container
|
||||
version_added: 1.9.0
|
||||
description:
|
||||
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container host instead
|
||||
of in the container itself.
|
||||
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node containerized.
|
||||
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the namespaces
|
||||
of the provided PID (default PID 1, or init/systemd).
|
||||
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||
options:
|
||||
nsenter_pid:
|
||||
description:
|
||||
- PID to attach with using nsenter.
|
||||
- The default should be fine unless you are attaching as a non-root user.
|
||||
type: int
|
||||
default: 1
|
||||
vars:
|
||||
- name: ansible_nsenter_pid
|
||||
env:
|
||||
- name: ANSIBLE_NSENTER_PID
|
||||
ini:
|
||||
- section: nsenter_connection
|
||||
key: nsenter_pid
|
||||
notes:
|
||||
- The remote user is ignored; this plugin always runs as root.
|
||||
- "This plugin requires the Ansible controller container to be launched in the following way: (1) The container image contains
|
||||
the C(nsenter) program; (2) The container is launched in privileged mode; (3) The container is launched in the host's
|
||||
PID namespace (C(--pid host))."
|
||||
"""
|
||||
|
||||
import os
|
||||
import pty
|
||||
import subprocess
|
||||
import fcntl
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import binary_type, text_type
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.selectors import selectors
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
'''Connections to a container host using nsenter
|
||||
'''
|
||||
|
||||
transport = 'community.docker.nsenter'
|
||||
has_pipelining = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
self.cwd = None
|
||||
|
||||
def _connect(self):
|
||||
self._nsenter_pid = self.get_option("nsenter_pid")
|
||||
|
||||
# Because nsenter requires very high privileges, our remote user
|
||||
# is always assumed to be root.
|
||||
self._play_context.remote_user = "root"
|
||||
|
||||
if not self._connected:
|
||||
display.vvv(
|
||||
u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
|
||||
self._play_context.remote_user
|
||||
),
|
||||
host=self._play_context.remote_addr,
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=True):
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
display.debug("in nsenter.exec_command()")
|
||||
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
|
||||
|
||||
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
|
||||
raise AnsibleError("failed to find the executable specified %s."
|
||||
" Please verify if the executable exists and re-try." % executable)
|
||||
|
||||
# Rewrite the provided command to prefix it with nsenter
|
||||
nsenter_cmd_parts = [
|
||||
"nsenter",
|
||||
"--ipc",
|
||||
"--mount",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--uts",
|
||||
"--preserve-credentials",
|
||||
"--target={0}".format(self._nsenter_pid),
|
||||
"--",
|
||||
]
|
||||
|
||||
if isinstance(cmd, (text_type, binary_type)):
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd = to_bytes(" ".join(cmd_parts))
|
||||
else:
|
||||
cmd_parts = nsenter_cmd_parts + cmd
|
||||
cmd = [to_bytes(arg) for arg in cmd_parts]
|
||||
|
||||
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
master = None
|
||||
stdin = subprocess.PIPE
|
||||
|
||||
# This plugin does not support pipelining. This diverges from the behavior of
|
||||
# the core "local" connection plugin that this one derives from.
|
||||
if sudoable and self.become and self.become.expect_prompt():
|
||||
# Create a pty if sudoable for privilege escalation that needs it.
|
||||
# Falls back to using a standard pipe if this fails, which may
|
||||
# cause the command to fail in certain situations where we are escalating
|
||||
# privileges or the command otherwise needs a pty.
|
||||
try:
|
||||
master, stdin = pty.openpty()
|
||||
except (IOError, OSError) as e:
|
||||
display.debug("Unable to open pty: %s" % to_native(e))
|
||||
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
shell=isinstance(cmd, (text_type, binary_type)),
|
||||
executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
|
||||
cwd=self.cwd,
|
||||
stdin=stdin,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||
if master is not None:
|
||||
os.close(stdin)
|
||||
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b''
|
||||
try:
|
||||
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
|
||||
|
||||
for key, event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
|
||||
if not chunk:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
|
||||
become_output += chunk
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
|
||||
if master is None:
|
||||
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
else:
|
||||
os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
|
||||
|
||||
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
# finally, close the other half of the pty, if it was created
|
||||
if master:
|
||||
os.close(master)
|
||||
|
||||
display.debug("done with nsenter.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
try:
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
in_data = in_file.read()
|
||||
rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
try:
|
||||
rc, out, err = self.exec_command(cmd=["cat", in_path])
|
||||
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
|
||||
if rc != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
|
||||
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
|
||||
out_file.write(out)
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
self._connected = False
|
@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
description: Can run in C(check_mode) and return changed status prediction without modifying target.
|
||||
diff_mode:
|
||||
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||
idempotent:
|
||||
description:
|
||||
- When run twice in a row outside check mode, with the same arguments, the second invocation indicates no change.
|
||||
- This assumes that the system controlled/queried by the module has not changed in a relevant way.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
IDEMPOTENT_NOT_MODIFY_STATE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
idempotent:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
INFO_MODULE = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
'''
|
||||
|
||||
ACTIONGROUP_DOCKER = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
action_group:
|
||||
description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
|
||||
support: full
|
||||
membership:
|
||||
- community.docker.docker
|
||||
- docker
|
||||
'''
|
||||
|
||||
CONN = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
become:
|
||||
description: Is usable alongside C(become) keywords.
|
||||
connection:
|
||||
description: Uses the target's configured connection information to execute code on it.
|
||||
delegation:
|
||||
description: Can be used in conjunction with C(delegate_to) and related keywords.
|
||||
"""
|
||||
|
||||
FACTS = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
facts:
|
||||
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment and the FACTS fragment
|
||||
FACTS_MODULE = r'''
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
facts:
|
||||
support: full
|
||||
'''
|
||||
|
||||
FILES = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
safe_file_operations:
|
||||
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
|
||||
"""
|
||||
|
||||
FLOW = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
action:
|
||||
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
|
||||
async:
|
||||
description: Supports being used with the C(async) keyword.
|
||||
"""
|
@ -0,0 +1,82 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
project_src:
|
||||
description:
|
||||
- Path to a directory containing a Compose file (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- If O(files) is provided, will look for these files in this directory instead.
|
||||
- Mutually exclusive with O(definition). One of O(project_src) and O(definition) must be provided.
|
||||
type: path
|
||||
project_name:
|
||||
description:
|
||||
- Provide a project name. If not provided, the project name is taken from the basename of O(project_src).
|
||||
- Required when O(definition) is provided.
|
||||
type: str
|
||||
files:
|
||||
description:
|
||||
- List of Compose file names relative to O(project_src) to be used instead of the main Compose file (C(compose.yml),
|
||||
C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- Files are loaded and merged in the order given.
|
||||
- Mutually exclusive with O(definition).
|
||||
type: list
|
||||
elements: path
|
||||
version_added: 3.7.0
|
||||
definition:
|
||||
description:
|
||||
- Compose file describing one or more services, networks and volumes.
|
||||
- Mutually exclusive with O(project_src) and O(files). One of O(project_src) and O(definition) must be provided.
|
||||
- If provided, PyYAML must be available to this module, and O(project_name) must be specified.
|
||||
- Note that a temporary directory will be created and deleted afterwards when using this option.
|
||||
type: dict
|
||||
version_added: 3.9.0
|
||||
env_files:
|
||||
description:
|
||||
- By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory.
|
||||
- O(env_files) can be used to specify the path of one or multiple custom environment files instead.
|
||||
- The path is relative to the O(project_src) directory.
|
||||
type: list
|
||||
elements: path
|
||||
profiles:
|
||||
description:
|
||||
- List of profiles to enable when starting services.
|
||||
- Equivalent to C(docker compose --profile).
|
||||
type: list
|
||||
elements: str
|
||||
check_files_existing:
|
||||
description:
|
||||
- If set to V(false), the module will not check whether one of the files C(compose.yaml), C(compose.yml), C(docker-compose.yaml),
|
||||
or C(docker-compose.yml) exists in O(project_src) if O(files) is not provided.
|
||||
- This can be useful if environment files with C(COMPOSE_FILE) are used to configure a different filename. The module
|
||||
currently does not check for C(COMPOSE_FILE) in environment files or the current environment.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 3.9.0
|
||||
requirements:
|
||||
- "PyYAML if O(definition) is used"
|
||||
notes:
|
||||
- |-
|
||||
The Docker compose CLI plugin has no stable output format (see for example U(https://github.com/docker/compose/issues/10872)),
|
||||
and for the main operations also no machine friendly output format. The module tries to accomodate this with various
|
||||
version-dependent behavior adjustments and with testing older and newer versions of the Docker compose CLI plugin.
|
||||
Currently the module is tested with multiple plugin versions between 2.18.1 and 2.23.3. The exact list of plugin versions
|
||||
will change over time. New releases of the Docker compose CLI plugin can break this module at any time.
|
||||
"""
|
||||
|
||||
# The following needs to be kept in sync with the compose_v2 module utils
|
||||
MINIMUM_VERSION = r'''
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker CLI with Docker compose plugin 2.18.0 or later"
|
||||
'''
|
@ -0,0 +1,378 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection
|
||||
string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically
|
||||
replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases: [docker_url]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
- B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for Python
|
||||
7.0.0 or newer will lead to an error.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [docker_api_version]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(ca.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(cert.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [tls_client_cert, cert_path]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(key.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [tls_client_key, key_path]
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Note that
|
||||
if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If
|
||||
the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [tls_verify]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define
|
||||
E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_TLS), E(DOCKER_TLS_VERIFY)
|
||||
and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment.
|
||||
It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages. For the Docker SDK for
|
||||
Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, and
|
||||
use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
"""
|
||||
|
||||
# For plugins: allow to define common options with Ansible variables
|
||||
|
||||
VAR_NAMES = r'''
|
||||
options:
|
||||
docker_host:
|
||||
vars:
|
||||
- name: ansible_docker_docker_host
|
||||
tls_hostname:
|
||||
vars:
|
||||
- name: ansible_docker_tls_hostname
|
||||
api_version:
|
||||
vars:
|
||||
- name: ansible_docker_api_version
|
||||
timeout:
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
ca_path:
|
||||
vars:
|
||||
- name: ansible_docker_ca_cert
|
||||
- name: ansible_docker_ca_path
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
vars:
|
||||
- name: ansible_docker_client_cert
|
||||
client_key:
|
||||
vars:
|
||||
- name: ansible_docker_client_key
|
||||
tls:
|
||||
vars:
|
||||
- name: ansible_docker_tls
|
||||
validate_certs:
|
||||
vars:
|
||||
- name: ansible_docker_validate_certs
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
|
||||
|
||||
DOCKER_PY_1_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details). Note that both
|
||||
modules should *not* be installed at the same time. Also note that when both modules are
|
||||
installed and one of them is uninstalled, the other might no longer function and a reinstall
|
||||
of it is required."
|
||||
'''
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r'''
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Python >= 2.7"
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does *not* work with docker-py."
|
||||
'''
|
||||
|
||||
# Docker doc fragment when using the vendored API access code
|
||||
API_DOCUMENTATION = r'''
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has
|
||||
been added as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ ca_cert, tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [ tls_verify ]
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
# In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified,
|
||||
# and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
|
||||
collection.
|
||||
requirements:
|
||||
- requests
|
||||
- pywin32 (when using named pipes on Windows 32)
|
||||
- paramiko (when using SSH with O(use_ssh_client=false))
|
||||
- pyOpenSSL (when using TLS)
|
||||
- backports.ssl_match_hostname (when using TLS on Python 2)
|
||||
'''
|
||||
|
||||
# Docker doc fragment when using the Docker CLI
|
||||
CLI_DOCUMENTATION = r'''
|
||||
options:
|
||||
docker_cli:
|
||||
description:
|
||||
- Path to the Docker CLI. If not provided, will search for Docker CLI on the E(PATH).
|
||||
type: path
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
- Mutually exclusive with O(cli_context). If neither O(docker_host) nor O(cli_context) are provided, the
|
||||
value V(unix:///var/run/docker.sock) is used.
|
||||
type: str
|
||||
aliases: [ docker_url ]
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases: [ docker_api_version ]
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ ca_cert, tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [ tls_verify ]
|
||||
# debug:
|
||||
# description:
|
||||
# - Debug mode
|
||||
# type: bool
|
||||
# default: false
|
||||
cli_context:
|
||||
description:
|
||||
- The Docker CLI context to use.
|
||||
- Mutually exclusive with O(docker_host).
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It directly calls the Docker CLI program.
|
||||
'''
|
@ -0,0 +1,393 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# For the parts taken from the docker inventory script:
|
||||
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_containers
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers
|
||||
version_added: 1.1.0
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.docker.docker.api_documentation
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with V(docker.(yml|yaml\)).
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker.yml) or V(docker.yaml). Other filenames will
|
||||
not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [community.docker.docker_containers]
|
||||
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and O(private_ssh_port)
|
||||
are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, and V(docker-api)
|
||||
(default) selects the P(community.docker.docker_api#connection) connection plugin.
|
||||
- When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin to the connection
|
||||
plugin. This can be controlled with O(configure_docker_daemon).
|
||||
- Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)!
|
||||
See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when O(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted
|
||||
to C(docker_hostconfig).
|
||||
- If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to V(true).
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- 'Add the same groups as the C(docker) inventory script does. These are the following:'
|
||||
- 'C(<container id>): contains the container of this ID.'
|
||||
- 'C(<container name>): contains the container that has this name.'
|
||||
- 'C(<container short id>): contains the containers that have this short ID (first 13 letters of ID).'
|
||||
- 'C(image_<image name>): contains the containers that have the image C(<image name>).'
|
||||
- 'C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>).'
|
||||
- 'C(service_<service name>): contains the containers that belong to the service C(<service name>).'
|
||||
- 'C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this
|
||||
plugin against multiple Docker daemons.'
|
||||
- 'C(running): contains all containers that are running.'
|
||||
- 'C(stopped): contains all containers that are not running.'
|
||||
- If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how
|
||||
to do that.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Minimal example using local Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with unverified TLS
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# Add containers with primary network foo to a network_foo group
|
||||
- prefix: network
|
||||
key: 'docker_hostconfig.NetworkMode'
|
||||
# Add Linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: docker_platform
|
||||
|
||||
---
|
||||
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||
plugin: community.docker.docker_containers
|
||||
connection_type: ssh
|
||||
compose:
|
||||
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||
|
||||
---
|
||||
# Only consider containers which have a label 'foo', or whose name starts with 'a'
|
||||
plugin: community.docker.docker_containers
|
||||
filters:
|
||||
# Accept all containers which have a label called 'foo'
|
||||
- include: >-
|
||||
"foo" in docker_config.Labels
|
||||
# Next accept all containers whose inventory_hostname starts with 'a'
|
||||
- include: >-
|
||||
inventory_hostname.startswith("a")
|
||||
# Exclude all containers that did not match any of the above filters
|
||||
- exclude: true
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import (
|
||||
DOCKER_COMMON_ARGS_VARS,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.unsafe import make_unsafe
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host
|
||||
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker daemon as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_containers'
|
||||
|
||||
def _slugify(self, value):
|
||||
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
|
||||
|
||||
def _populate(self, client):
|
||||
strict = self.get_option('strict')
|
||||
|
||||
ssh_port = self.get_option('private_ssh_port')
|
||||
default_ip = self.get_option('default_ip')
|
||||
hostname = self.get_option('docker_host')
|
||||
verbose_output = self.get_option('verbose_output')
|
||||
connection_type = self.get_option('connection_type')
|
||||
add_legacy_groups = self.get_option('add_legacy_groups')
|
||||
|
||||
try:
|
||||
params = {
|
||||
'limit': -1,
|
||||
'all': 1,
|
||||
'size': 0,
|
||||
'trunc_cmd': 0,
|
||||
'since': None,
|
||||
'before': None,
|
||||
}
|
||||
containers = client.get_json('/containers/json', params=params)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error listing containers: %s" % to_native(exc))
|
||||
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group('running')
|
||||
self.inventory.add_group('stopped')
|
||||
|
||||
extra_facts = {}
|
||||
if self.get_option('configure_docker_daemon'):
|
||||
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||
value = self.get_option(option_name)
|
||||
if value is not None:
|
||||
extra_facts[var_name] = value
|
||||
|
||||
filters = parse_filters(self.get_option('filters'))
|
||||
for container in containers:
|
||||
id = container.get('Id')
|
||||
short_id = id[:13]
|
||||
|
||||
try:
|
||||
name = container.get('Names', list())[0].lstrip('/')
|
||||
full_name = name
|
||||
except IndexError:
|
||||
name = short_id
|
||||
full_name = id
|
||||
|
||||
facts = dict(
|
||||
docker_name=make_unsafe(name),
|
||||
docker_short_id=make_unsafe(short_id),
|
||||
)
|
||||
full_facts = dict()
|
||||
|
||||
try:
|
||||
inspect = client.get_json('/containers/{0}/json', id)
|
||||
except APIError as exc:
|
||||
raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
|
||||
|
||||
state = inspect.get('State') or dict()
|
||||
config = inspect.get('Config') or dict()
|
||||
labels = config.get('Labels') or dict()
|
||||
|
||||
running = state.get('Running')
|
||||
|
||||
groups = []
|
||||
|
||||
# Add container to groups
|
||||
image_name = config.get('Image')
|
||||
if image_name and add_legacy_groups:
|
||||
groups.append('image_{0}'.format(image_name))
|
||||
|
||||
stack_name = labels.get('com.docker.stack.namespace')
|
||||
if stack_name:
|
||||
full_facts['docker_stack'] = stack_name
|
||||
if add_legacy_groups:
|
||||
groups.append('stack_{0}'.format(stack_name))
|
||||
|
||||
service_name = labels.get('com.docker.swarm.service.name')
|
||||
if service_name:
|
||||
full_facts['docker_service'] = service_name
|
||||
if add_legacy_groups:
|
||||
groups.append('service_{0}'.format(service_name))
|
||||
|
||||
ansible_connection = None
|
||||
if connection_type == 'ssh':
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
# Lookup the public facing port Nat'ed to ssh port.
|
||||
network_settings = inspect.get('NetworkSettings') or {}
|
||||
port_settings = network_settings.get('Ports') or {}
|
||||
port = port_settings.get('%d/tcp' % (ssh_port, ))[0]
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
port = dict()
|
||||
|
||||
try:
|
||||
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
|
||||
except KeyError:
|
||||
ip = ''
|
||||
|
||||
facts.update(dict(
|
||||
ansible_ssh_host=ip,
|
||||
ansible_ssh_port=port.get('HostPort', 0),
|
||||
))
|
||||
elif connection_type == 'docker-cli':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
))
|
||||
ansible_connection = 'community.docker.docker'
|
||||
elif connection_type == 'docker-api':
|
||||
facts.update(dict(
|
||||
ansible_host=full_name,
|
||||
))
|
||||
facts.update(extra_facts)
|
||||
ansible_connection = 'community.docker.docker_api'
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
full_facts = make_unsafe(full_facts)
|
||||
|
||||
if ansible_connection:
|
||||
for d in (facts, full_facts):
|
||||
if 'ansible_connection' not in d:
|
||||
d['ansible_connection'] = ansible_connection
|
||||
|
||||
if not filter_host(self, name, full_facts, filters):
|
||||
continue
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
self.inventory.add_host(name)
|
||||
for group in groups:
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_host(name, group=group)
|
||||
|
||||
for key, value in facts.items():
|
||||
self.inventory.set_variable(name, key, value)
|
||||
|
||||
# Use constructed if applicable
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
|
||||
|
||||
# We need to do this last since we also add a group called `name`.
|
||||
# When we do this before a set_variable() call, the variables are assigned
|
||||
# to the group, and not to the host.
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group(id)
|
||||
self.inventory.add_host(name, group=id)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_host(name, group=name)
|
||||
self.inventory.add_group(short_id)
|
||||
self.inventory.add_host(name, group=short_id)
|
||||
self.inventory.add_group(hostname)
|
||||
self.inventory.add_host(name, group=hostname)
|
||||
|
||||
if running is True:
|
||||
self.inventory.add_host(name, group='running')
|
||||
else:
|
||||
self.inventory.add_host(name, group='stopped')
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker.yaml', 'docker.yml')))
|
||||
|
||||
def _create_client(self):
|
||||
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
client = self._create_client()
|
||||
try:
|
||||
self._populate(client)
|
||||
except DockerException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected Docker error occurred: {0}'.format(e)
|
||||
)
|
||||
except RequestException as e:
|
||||
raise AnsibleError(
|
||||
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(e)
|
||||
)
|
@ -0,0 +1,295 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with V(docker_machine.(yml|yaml\)).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables.
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_machine.yml) or V(docker_machine.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: true
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. A warning
|
||||
will be issued for any skipped host if the choice is V(require).
|
||||
- With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. A warning
|
||||
will be issued for hosts where they cannot be fetched if the choice is V(optional).
|
||||
- With V(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: true
|
||||
verbose_output:
|
||||
description:
|
||||
- When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named
|
||||
C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: true
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Minimal example
|
||||
plugin: community.docker.docker_machine
|
||||
|
||||
---
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), for example:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
---
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
---
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
plugin: community.docker.docker_machine
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.unsafe import make_unsafe
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_machine'
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node):
|
||||
try:
|
||||
ip_addr = self._run_command(['ip', node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
filters = parse_filters(self.get_option('filters'))
|
||||
try:
|
||||
for node in self._get_machine_names():
|
||||
node_attrs = self._inspect_docker_machine_host(node)
|
||||
if not node_attrs:
|
||||
continue
|
||||
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
|
||||
machine_name = unsafe_node_attrs['Driver']['MachineName']
|
||||
if not filter_host(self, machine_name, unsafe_node_attrs, filters):
|
||||
continue
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if unsafe_node_attrs['Driver']['IPAddress']:
|
||||
ip_addr = unsafe_node_attrs['Driver']['IPAddress']
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', make_unsafe(ip_addr))
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', unsafe_node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', unsafe_node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', unsafe_node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = unsafe_node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', make_unsafe(tags))
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), make_unsafe(kv[1]))
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', unsafe_node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), unsafe_node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
@ -0,0 +1,276 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file that ends with V(docker_swarm.(yml|yaml\)).
|
||||
- 'The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; C(managers) -
|
||||
all manager nodes; C(leader) - the swarm leader node; C(nonleaders) - all nodes except the swarm leader.'
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_swarm.yml) or V(docker_swarm.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [docker_swarm, community.docker.docker_swarm]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- Use V(unix:///var/run/docker.sock) to connect through a local socket.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [docker_url]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), C(EngineVersion)).
|
||||
type: bool
|
||||
default: true
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [tls_verify]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [tls_client_key, key_path]
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [tls_client_cert, cert_path]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of the server.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python.
|
||||
type: str
|
||||
aliases: [docker_api_version]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [time_out]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the swarm leader
|
||||
in format of V(tcp://172.16.0.1:2376). This value may be used without additional modification as value of option O(docker_host)
|
||||
in Docker Swarm modules when connecting through the API. The port always defaults to V(2376).
|
||||
type: bool
|
||||
default: false
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in C(ansible_host_uri).
|
||||
type: int
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
# Minimal example using local docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# add for example x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add for example linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# for exomple a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
|
||||
from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils.unsafe import make_unsafe
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host
|
||||
|
||||
try:
|
||||
import docker
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'community.docker.docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_path'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
use_ssh_client=self.get_option('use_ssh_client'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
filters = parse_filters(self.get_option('filters'))
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for node in self.nodes:
|
||||
node_attrs = self.client.nodes.get(node.id).attrs
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
if not filter_host(self, unsafe_node_attrs['ID'], unsafe_node_attrs, filters):
|
||||
continue
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'])
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group=unsafe_node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host',
|
||||
unsafe_node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri',
|
||||
make_unsafe('tcp://' + unsafe_node_attrs['Status']['Addr'] + ':' + host_uri_port))
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'docker_swarm_node_attributes', unsafe_node_attrs)
|
||||
if 'ManagerStatus' in unsafe_node_attrs:
|
||||
if unsafe_node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
unsafe_node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri',
|
||||
make_unsafe('tcp://' + swarm_leader_ip + ':' + host_uri_port))
|
||||
self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host', make_unsafe(swarm_leader_ip))
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
@ -0,0 +1,107 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
|
||||
REQUESTS_IMPORT_ERROR = None
|
||||
URLLIB3_IMPORT_ERROR = None
|
||||
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = None
|
||||
|
||||
|
||||
try:
|
||||
from requests import Session # noqa: F401, pylint: disable=unused-import
|
||||
from requests.adapters import HTTPAdapter # noqa: F401, pylint: disable=unused-import
|
||||
from requests.exceptions import HTTPError, InvalidSchema # noqa: F401, pylint: disable=unused-import
|
||||
except ImportError:
|
||||
REQUESTS_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
class Session(object):
|
||||
__attrs__ = []
|
||||
|
||||
class HTTPAdapter(object):
|
||||
__attrs__ = []
|
||||
|
||||
class HTTPError(Exception):
|
||||
pass
|
||||
|
||||
class InvalidSchema(Exception):
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
from requests.packages import urllib3
|
||||
from requests.packages.urllib3 import connection as urllib3_connection # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
try:
|
||||
import urllib3
|
||||
from urllib3 import connection as urllib3_connection # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
URLLIB3_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
class _HTTPConnectionPool(object):
|
||||
pass
|
||||
|
||||
class _HTTPConnection(object):
|
||||
pass
|
||||
|
||||
class FakeURLLIB3(object):
|
||||
def __init__(self):
|
||||
self._collections = self
|
||||
self.poolmanager = self
|
||||
self.connection = self
|
||||
self.connectionpool = self
|
||||
|
||||
self.RecentlyUsedContainer = object()
|
||||
self.PoolManager = object()
|
||||
self.match_hostname = object()
|
||||
self.HTTPConnectionPool = _HTTPConnectionPool
|
||||
|
||||
class FakeURLLIB3Connection(object):
|
||||
def __init__(self):
|
||||
self.HTTPConnection = _HTTPConnection
|
||||
|
||||
urllib3 = FakeURLLIB3()
|
||||
urllib3_connection = FakeURLLIB3Connection()
|
||||
|
||||
|
||||
# Monkey-patching match_hostname with a version that supports
|
||||
# IP-address checking. Not necessary for Python 3.5 and above
|
||||
if PY2:
|
||||
try:
|
||||
from backports.ssl_match_hostname import match_hostname
|
||||
urllib3.connection.match_hostname = match_hostname
|
||||
except ImportError:
|
||||
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
def fail_on_missing_imports():
|
||||
if REQUESTS_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install requests',
|
||||
'requests', REQUESTS_IMPORT_ERROR)
|
||||
if URLLIB3_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install urllib3',
|
||||
'urllib3', URLLIB3_IMPORT_ERROR)
|
||||
if BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException
|
||||
|
||||
raise MissingRequirementException(
|
||||
'You have to install backports.ssl-match-hostname',
|
||||
'backports.ssl-match-hostname', BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR)
|
@ -0,0 +1,613 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import logging
|
||||
import struct
|
||||
from functools import partial
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type, iteritems, string_types, raise_from
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
|
||||
from .. import auth
|
||||
from .._import_helper import fail_on_missing_imports
|
||||
from .._import_helper import HTTPError as _HTTPError
|
||||
from .._import_helper import InvalidSchema as _InvalidSchema
|
||||
from .._import_helper import Session as _Session
|
||||
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
|
||||
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
|
||||
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES,
|
||||
DEFAULT_DATA_CHUNK_SIZE)
|
||||
from ..errors import (DockerException, InvalidVersion, TLSParameterError, MissingRequirementException,
|
||||
create_api_error_from_http_exception)
|
||||
from ..tls import TLSConfig
|
||||
from ..transport.npipeconn import NpipeHTTPAdapter
|
||||
from ..transport.npipesocket import PYWIN32_IMPORT_ERROR
|
||||
from ..transport.unixconn import UnixHTTPAdapter
|
||||
from ..transport.sshconn import SSHHTTPAdapter, PARAMIKO_IMPORT_ERROR
|
||||
from ..transport.ssladapter import SSLHTTPAdapter
|
||||
from ..utils import config, utils, json_stream
|
||||
from ..utils.decorators import check_resource, update_headers
|
||||
from ..utils.proxy import ProxyConfig
|
||||
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
|
||||
|
||||
from .daemon import DaemonApiMixin
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIClient(
|
||||
_Session,
|
||||
DaemonApiMixin):
|
||||
"""
|
||||
A low-level client for the Docker Engine API.
|
||||
|
||||
Example:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
|
||||
>>> client.version()
|
||||
{u'ApiVersion': u'1.33',
|
||||
u'Arch': u'amd64',
|
||||
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
|
||||
u'GitCommit': u'f4ffd2511c',
|
||||
u'GoVersion': u'go1.9.2',
|
||||
u'KernelVersion': u'4.14.3-1-ARCH',
|
||||
u'MinAPIVersion': u'1.12',
|
||||
u'Os': u'linux',
|
||||
u'Version': u'17.10.0-ce'}
|
||||
|
||||
Args:
|
||||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
:py:class:`~docker.tls.TLSConfig` object to use custom
|
||||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
credstore_env (dict): Override environment variables when calling the
|
||||
credential store process.
|
||||
use_ssh_client (bool): If set to `True`, an ssh connection is made
|
||||
via shelling out to the ssh client. Ensure the ssh client is
|
||||
installed and configured on the host.
|
||||
max_pool_size (int): The maximum number of connections
|
||||
to save in the pool.
|
||||
"""
|
||||
|
||||
__attrs__ = _Session.__attrs__ + ['_auth_configs',
|
||||
'_general_configs',
|
||||
'_version',
|
||||
'base_url',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=DEFAULT_USER_AGENT, num_pools=None,
|
||||
credstore_env=None, use_ssh_client=False,
|
||||
max_pool_size=DEFAULT_MAX_POOL_SIZE):
|
||||
super(APIClient, self).__init__()
|
||||
|
||||
fail_on_missing_imports()
|
||||
|
||||
if tls and not base_url:
|
||||
raise TLSParameterError(
|
||||
'If using TLS, the base_url argument must be provided.'
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.headers['User-Agent'] = user_agent
|
||||
|
||||
self._general_configs = config.load_general_config()
|
||||
|
||||
proxy_config = self._general_configs.get('proxies', {})
|
||||
try:
|
||||
proxies = proxy_config[base_url]
|
||||
except KeyError:
|
||||
proxies = proxy_config.get('default', {})
|
||||
|
||||
self._proxy_configs = ProxyConfig.from_dict(proxies)
|
||||
|
||||
self._auth_configs = auth.load_config(
|
||||
config_dict=self._general_configs, credstore_env=credstore_env,
|
||||
)
|
||||
self.credstore_env = credstore_env
|
||||
|
||||
base_url = utils.parse_host(
|
||||
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
|
||||
)
|
||||
# SSH has a different default for num_pools to all other adapters
|
||||
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
|
||||
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
|
||||
|
||||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
# host part of URL should be unused, but is resolved by requests
|
||||
# module in proxy_bypass_macosx_sysconf()
|
||||
self.base_url = 'http+docker://localhost'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
raise DockerException(
|
||||
'The npipe:// protocol is only supported on Windows'
|
||||
)
|
||||
if PYWIN32_IMPORT_ERROR is not None:
|
||||
raise MissingRequirementException(
|
||||
'Install pypiwin32 package to enable npipe:// support',
|
||||
'pywin32',
|
||||
PYWIN32_IMPORT_ERROR)
|
||||
self._custom_adapter = NpipeHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
elif base_url.startswith('ssh://'):
|
||||
if PARAMIKO_IMPORT_ERROR is not None and not use_ssh_client:
|
||||
raise MissingRequirementException(
|
||||
'Install paramiko package to enable ssh:// support',
|
||||
'paramiko',
|
||||
PARAMIKO_IMPORT_ERROR)
|
||||
self._custom_adapter = SSHHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size, shell_out=use_ssh_client
|
||||
)
|
||||
self.mount('http+docker://ssh', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://ssh'
|
||||
else:
|
||||
# Use SSLAdapter for the ability to specify SSL version
|
||||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = SSLHTTPAdapter(
|
||||
pool_connections=num_pools)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
||||
# version detection needs to be after unix adapter mounting
|
||||
if version is None or (isinstance(version, string_types) and version.lower() == 'auto'):
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
if not isinstance(self._version, string_types):
|
||||
raise DockerException(
|
||||
'Version parameter must be a string or None. Found {0}'.format(
|
||||
type(version).__name__
|
||||
)
|
||||
)
|
||||
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
|
||||
raise InvalidVersion(
|
||||
'API versions below {0} are no longer supported by this '
|
||||
'library.'.format(MINIMUM_DOCKER_API_VERSION)
|
||||
)
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
version_result = self.version(api_version=False)
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}'.format(e)
|
||||
)
|
||||
|
||||
try:
|
||||
return version_result["ApiVersion"]
|
||||
except KeyError:
|
||||
raise DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
)
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}. Response seems to be broken.'.format(e)
|
||||
)
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self.timeout)
|
||||
return kwargs
|
||||
|
||||
@update_headers
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _head(self, url, **kwargs):
|
||||
return self.head(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _put(self, url, **kwargs):
|
||||
return self.put(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, string_types):
|
||||
raise ValueError(
|
||||
'Expected a string but found {0} ({1}) '
|
||||
'instead'.format(arg, type(arg))
|
||||
)
|
||||
|
||||
quote_f = partial(quote, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
self.base_url, self._version, pathfmt.format(*args)
|
||||
)
|
||||
else:
|
||||
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
|
||||
|
||||
def _raise_for_status(self, response):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except _HTTPError as e:
|
||||
raise_from(create_api_error_from_http_exception(e), e)
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
if json and binary:
|
||||
raise AssertionError('json and binary must not be both True')
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 cannot unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None and isinstance(data, dict):
|
||||
for k, v in iteritems(data):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
elif data is not None:
|
||||
data2 = data
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
if self.base_url == "http+docker://localnpipe":
|
||||
sock = response.raw._fp.fp.raw.sock
|
||||
elif self.base_url.startswith('http+docker://ssh'):
|
||||
sock = response.raw._fp.fp.channel
|
||||
elif PY3:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
else:
|
||||
sock = response.raw._fp.fp._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
# close TLS sockets.
|
||||
sock._response = response
|
||||
except AttributeError:
|
||||
# UNIX sockets cannot have attributes set on them, but that's
|
||||
# fine because we will not be doing TLS over them
|
||||
pass
|
||||
|
||||
return sock
|
||||
|
||||
def _stream_helper(self, response, decode=False):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
|
||||
if response.raw._fp.chunked:
|
||||
if decode:
|
||||
for chunk in json_stream.json_stream(self._stream_helper(response, False)):
|
||||
yield chunk
|
||||
else:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
# this read call will block until we get a chunk
|
||||
data = reader.read(1)
|
||||
if not data:
|
||||
break
|
||||
if reader._fp.chunk_left:
|
||||
data += reader.read(reader._fp.chunk_left)
|
||||
yield data
|
||||
else:
|
||||
# Response is not chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
buf_length = len(buf)
|
||||
walker = 0
|
||||
while True:
|
||||
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
|
||||
break
|
||||
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
|
||||
dummy, length = struct.unpack_from('>BxxxL', header)
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_response_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
stream."""
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
while True:
|
||||
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
dummy, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
continue
|
||||
data = response.raw.read(length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _stream_raw_result(self, response, chunk_size=1, decode=True):
|
||||
''' Stream result for TTY-enabled container and raw binary data'''
|
||||
self._raise_for_status(response)
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
for out in response.iter_content(chunk_size, decode):
|
||||
yield out
|
||||
|
||||
def _read_from_socket(self, response, stream, tty=True, demux=False):
|
||||
"""Consume all data from the socket, close the response and return the
|
||||
data. If stream=True, then a generator is returned instead and the
|
||||
caller is responsible for closing the response.
|
||||
"""
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
gen = frames_iter(socket, tty)
|
||||
|
||||
if demux:
|
||||
# The generator will output tuples (stdout, stderr)
|
||||
gen = (demux_adaptor(*frame) for frame in gen)
|
||||
else:
|
||||
# The generator will output strings
|
||||
gen = (data for (dummy, data) in gen)
|
||||
|
||||
if stream:
|
||||
return gen
|
||||
else:
|
||||
try:
|
||||
# Wait for all the frames, concatenate them, and return the result
|
||||
return consume_socket_output(gen, demux=demux)
|
||||
finally:
|
||||
response.close()
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we are
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
may or may not exist; or we may need to just settimeout on socket
|
||||
itself, which also may or may not have settimeout on it. To avoid
|
||||
missing the correct one, we try both.
|
||||
|
||||
We also do not want to set the timeout if it is already disabled, as
|
||||
you run the risk of changing a socket that was non-blocking to
|
||||
blocking, for example when using gevent.
|
||||
"""
|
||||
sockets = [socket, getattr(socket, '_sock', None)]
|
||||
|
||||
for s in sockets:
|
||||
if not hasattr(s, 'settimeout'):
|
||||
continue
|
||||
|
||||
timeout = -1
|
||||
|
||||
if hasattr(s, 'gettimeout'):
|
||||
timeout = s.gettimeout()
|
||||
|
||||
# Do not change the timeout if it is already disabled.
|
||||
if timeout is None or timeout == 0.0:
|
||||
continue
|
||||
|
||||
s.settimeout(None)
|
||||
|
||||
@check_resource('container')
|
||||
def _check_is_tty(self, container):
|
||||
cont = self.inspect_container(container)
|
||||
return cont['Config']['Tty']
|
||||
|
||||
def _get_result(self, container, stream, res):
|
||||
return self._get_result_tty(stream, res, self._check_is_tty(container))
|
||||
|
||||
def _get_result_tty(self, stream, res, is_tty):
|
||||
# We should also use raw streaming (without keep-alive)
|
||||
# if we are dealing with a tty-enabled container.
|
||||
if is_tty:
|
||||
return self._stream_raw_result(res) if stream else \
|
||||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = binary_type()
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
list(self._multiplexed_buffer_helper(res))
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(APIClient, self).get_adapter(url)
|
||||
except _InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
else:
|
||||
raise e
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self._version
|
||||
|
||||
def reload_config(self, dockercfg_path=None):
|
||||
"""
|
||||
Force a reload of the auth configuration
|
||||
|
||||
Args:
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
def _set_auth_headers(self, headers):
|
||||
log.debug('Looking for auth config')
|
||||
|
||||
# If we do not have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs or self._auth_configs.is_empty:
|
||||
log.debug("No auth config in memory - loading from filesystem")
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
# Send the full auth configuration (if any exists), since the build
|
||||
# could use any (or all) of the registries.
|
||||
if self._auth_configs:
|
||||
auth_data = self._auth_configs.get_all_credentials()
|
||||
|
||||
# See https://github.com/docker/docker-py/issues/1683
|
||||
if (auth.INDEX_URL not in auth_data and
|
||||
auth.INDEX_NAME in auth_data):
|
||||
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
|
||||
|
||||
log.debug(
|
||||
'Sending auth config (%s)',
|
||||
', '.join(repr(k) for k in auth_data.keys())
|
||||
)
|
||||
|
||||
if auth_data:
|
||||
headers['X-Registry-Config'] = auth.encode_header(
|
||||
auth_data
|
||||
)
|
||||
else:
|
||||
log.debug('No auth config found')
|
||||
|
||||
def get_binary(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), binary=True)
|
||||
|
||||
def get_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
||||
|
||||
def get_text(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def get_raw_stream(self, pathfmt, *args, **kwargs):
|
||||
chunk_size = kwargs.pop('chunk_size', DEFAULT_DATA_CHUNK_SIZE)
|
||||
res = self._get(self._url(pathfmt, *args, versioned_api=True), stream=True, **kwargs)
|
||||
self._raise_for_status(res)
|
||||
return self._stream_raw_result(res, chunk_size, False)
|
||||
|
||||
def delete_call(self, pathfmt, *args, **kwargs):
|
||||
self._raise_for_status(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def delete_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
||||
|
||||
def post_call(self, pathfmt, *args, **kwargs):
|
||||
self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs))
|
||||
|
||||
def post_json(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs))
|
||||
|
||||
def post_json_to_binary(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), binary=True)
|
||||
|
||||
def post_json_to_json(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), json=True)
|
||||
|
||||
def post_json_to_text(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
|
||||
def post_json_to_stream_socket(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
headers = (kwargs.pop('headers', None) or {}).copy()
|
||||
headers.update({
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp',
|
||||
})
|
||||
return self._get_raw_response_socket(
|
||||
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs))
|
||||
|
||||
def post_json_to_stream(self, pathfmt, *args, **kwargs):
|
||||
data = kwargs.pop('data', None)
|
||||
headers = (kwargs.pop('headers', None) or {}).copy()
|
||||
headers.update({
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp',
|
||||
})
|
||||
stream = kwargs.pop('stream', False)
|
||||
demux = kwargs.pop('demux', False)
|
||||
tty = kwargs.pop('tty', False)
|
||||
return self._read_from_socket(
|
||||
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs),
|
||||
stream,
|
||||
tty=tty,
|
||||
demux=demux
|
||||
)
|
||||
|
||||
def post_to_json(self, pathfmt, *args, **kwargs):
|
||||
return self._result(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
|
@ -0,0 +1,134 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from .. import auth
|
||||
from ..utils.decorators import minimum_version
|
||||
|
||||
|
||||
class DaemonApiMixin(object):
|
||||
@minimum_version('1.25')
|
||||
def df(self):
|
||||
"""
|
||||
Get data usage information.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary representing different resource categories
|
||||
and their respective data usage.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/system/df')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
def info(self):
|
||||
"""
|
||||
Display system-wide information. Identical to the ``docker info``
|
||||
command.
|
||||
|
||||
Returns:
|
||||
(dict): The info as a dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url("/info")), True)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False, dockercfg_path=None):
|
||||
"""
|
||||
Authenticate with a registry. Similar to the ``docker login`` command.
|
||||
|
||||
Args:
|
||||
username (str): The registry username
|
||||
password (str): The plaintext password
|
||||
email (str): The email for the registry account
|
||||
registry (str): URL to the registry. E.g.
|
||||
``https://index.docker.io/v1/``
|
||||
reauth (bool): Whether or not to refresh existing authentication on
|
||||
the Docker server.
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
(dict): The response from the login request
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
# If we do not have any auth data so far, try reloading the config file
|
||||
# one more time in case anything showed up in there.
|
||||
# If dockercfg_path is passed check to see if the config file exists,
|
||||
# if so load that config.
|
||||
if dockercfg_path and os.path.exists(dockercfg_path):
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
elif not self._auth_configs or self._auth_configs.is_empty:
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
authcfg = self._auth_configs.resolve_authconfig(registry)
|
||||
# If we found an existing auth config for this registry and username
|
||||
# combination, we can return it immediately unless reauth is requested.
|
||||
if authcfg and authcfg.get('username', None) == username \
|
||||
and not reauth:
|
||||
return authcfg
|
||||
|
||||
req_data = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': email,
|
||||
'serveraddress': registry,
|
||||
}
|
||||
|
||||
response = self._post_json(self._url('/auth'), data=req_data)
|
||||
if response.status_code == 200:
|
||||
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
|
||||
return self._result(response, json=True)
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
Checks the server is responsive. An exception will be raised if it
|
||||
is not responding.
|
||||
|
||||
Returns:
|
||||
(bool) The response from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url('/_ping'))) == 'OK'
|
||||
|
||||
def version(self, api_version=True):
|
||||
"""
|
||||
Returns version information from the server. Similar to the ``docker
|
||||
version`` command.
|
||||
|
||||
Returns:
|
||||
(dict): The server version information
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/version", versioned_api=api_version)
|
||||
return self._result(self._get(url), json=True)
|
@ -0,0 +1,388 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
|
||||
from ansible.module_utils.six import iteritems, string_types
|
||||
|
||||
from . import errors
|
||||
from .credentials.store import Store
|
||||
from .credentials.errors import StoreError, CredentialsNotFound
|
||||
from .utils import config
|
||||
|
||||
INDEX_NAME = 'docker.io'
|
||||
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
|
||||
TOKEN_USERNAME = '<token>'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise errors.InvalidRepository(
|
||||
'Repository name cannot contain a scheme ({0})'.format(repo_name)
|
||||
)
|
||||
|
||||
index_name, remote_name = split_repo_name(repo_name)
|
||||
if index_name[0] == '-' or index_name[-1] == '-':
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid index name ({0}). Cannot begin or end with a'
|
||||
' hyphen.'.format(index_name)
|
||||
)
|
||||
return resolve_index_name(index_name), remote_name
|
||||
|
||||
|
||||
def resolve_index_name(index_name):
|
||||
index_name = convert_to_hostname(index_name)
|
||||
if index_name == 'index.' + INDEX_NAME:
|
||||
index_name = INDEX_NAME
|
||||
return index_name
|
||||
|
||||
|
||||
def get_config_header(client, registry):
|
||||
log.debug('Looking for auth config')
|
||||
if not client._auth_configs or client._auth_configs.is_empty:
|
||||
log.debug(
|
||||
"No auth config in memory - loading from filesystem"
|
||||
)
|
||||
client._auth_configs = load_config(credstore_env=client.credstore_env)
|
||||
authcfg = resolve_authconfig(
|
||||
client._auth_configs, registry, credstore_env=client.credstore_env
|
||||
)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug('Found auth config')
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
return encode_header(authcfg)
|
||||
log.debug('No auth config found')
|
||||
return None
|
||||
|
||||
|
||||
def split_repo_name(repo_name):
|
||||
parts = repo_name.split('/', 1)
|
||||
if len(parts) == 1 or (
|
||||
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
|
||||
):
|
||||
# This is a docker index repo (ex: username/foobar or ubuntu)
|
||||
return INDEX_NAME, repo_name
|
||||
return tuple(parts)
|
||||
|
||||
|
||||
def get_credential_store(authconfig, registry):
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig)
|
||||
return authconfig.get_credential_store(registry)
|
||||
|
||||
|
||||
class AuthConfig(dict):
|
||||
def __init__(self, dct, credstore_env=None):
|
||||
if 'auths' not in dct:
|
||||
dct['auths'] = {}
|
||||
self.update(dct)
|
||||
self._credstore_env = credstore_env
|
||||
self._stores = {}
|
||||
|
||||
@classmethod
|
||||
def parse_auth(cls, entries, raise_on_error=False):
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
conf = {}
|
||||
for registry, entry in iteritems(entries):
|
||||
if not isinstance(entry, dict):
|
||||
log.debug('Config entry for key %s is not auth config', registry)
|
||||
# We sometimes fall back to parsing the whole config as if it
|
||||
# was the auth config by itself, for legacy purposes. In that
|
||||
# case, we fail silently and return an empty conf if any of the
|
||||
# keys is not formatted properly.
|
||||
if raise_on_error:
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid configuration for registry {0}'.format(
|
||||
registry
|
||||
)
|
||||
)
|
||||
return {}
|
||||
if 'identitytoken' in entry:
|
||||
log.debug('Found an IdentityToken entry for registry %s', registry)
|
||||
conf[registry] = {
|
||||
'IdentityToken': entry['identitytoken']
|
||||
}
|
||||
continue # Other values are irrelevant if we have a token
|
||||
|
||||
if 'auth' not in entry:
|
||||
# Starting with engine v1.11 (API 1.23), an empty dictionary is
|
||||
# a valid value in the auths config.
|
||||
# https://github.com/docker/compose/issues/3265
|
||||
log.debug('Auth data for %s is absent. Client might be using a credentials store instead.', registry)
|
||||
conf[registry] = {}
|
||||
continue
|
||||
|
||||
username, password = decode_auth(entry['auth'])
|
||||
log.debug('Found entry (registry=%s, username=%s)', repr(registry), repr(username))
|
||||
|
||||
conf[registry] = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': entry.get('email'),
|
||||
'serveraddress': registry,
|
||||
}
|
||||
return conf
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, config_path, config_dict, credstore_env=None):
|
||||
"""
|
||||
Loads authentication data from a Docker configuration file in the given
|
||||
root directory or if config_path is passed use given path.
|
||||
Lookup priority:
|
||||
explicit config_path parameter > DOCKER_CONFIG environment
|
||||
variable > ~/.docker/config.json > ~/.dockercfg
|
||||
"""
|
||||
|
||||
if not config_dict:
|
||||
config_file = config.find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return cls({}, credstore_env)
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
config_dict = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
# Likely missing new Docker config file or it is in an
|
||||
# unknown format, continue to attempt to read old location
|
||||
# and format.
|
||||
log.debug(e)
|
||||
return cls(_load_legacy_config(config_file), credstore_env)
|
||||
|
||||
res = {}
|
||||
if config_dict.get('auths'):
|
||||
log.debug("Found 'auths' section")
|
||||
res.update({
|
||||
'auths': cls.parse_auth(
|
||||
config_dict.pop('auths'), raise_on_error=True
|
||||
)
|
||||
})
|
||||
if config_dict.get('credsStore'):
|
||||
log.debug("Found 'credsStore' section")
|
||||
res.update({'credsStore': config_dict.pop('credsStore')})
|
||||
if config_dict.get('credHelpers'):
|
||||
log.debug("Found 'credHelpers' section")
|
||||
res.update({'credHelpers': config_dict.pop('credHelpers')})
|
||||
if res:
|
||||
return cls(res, credstore_env)
|
||||
|
||||
log.debug(
|
||||
"Could not find auth-related section ; attempting to interpret "
|
||||
"as auth-only file"
|
||||
)
|
||||
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
|
||||
|
||||
@property
|
||||
def auths(self):
|
||||
return self.get('auths', {})
|
||||
|
||||
@property
|
||||
def creds_store(self):
|
||||
return self.get('credsStore', None)
|
||||
|
||||
@property
|
||||
def cred_helpers(self):
|
||||
return self.get('credHelpers', {})
|
||||
|
||||
@property
|
||||
def is_empty(self):
|
||||
return (
|
||||
not self.auths and not self.creds_store and not self.cred_helpers
|
||||
)
|
||||
|
||||
def resolve_authconfig(self, registry=None):
|
||||
"""
|
||||
Returns the authentication data from the given auth configuration for a
|
||||
specific registry. As with the Docker client, legacy entries in the
|
||||
config with full URLs are stripped down to hostnames before checking
|
||||
for a match. Returns None if no match was found.
|
||||
"""
|
||||
|
||||
if self.creds_store or self.cred_helpers:
|
||||
store_name = self.get_credential_store(registry)
|
||||
if store_name is not None:
|
||||
log.debug('Using credentials store "%s"', store_name)
|
||||
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||
if cfg is not None:
|
||||
return cfg
|
||||
log.debug('No entry in credstore - fetching from auth dict')
|
||||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
|
||||
if registry in self.auths:
|
||||
log.debug("Found %s", repr(registry))
|
||||
return self.auths[registry]
|
||||
|
||||
for key, conf in iteritems(self.auths):
|
||||
if resolve_index_name(key) == registry:
|
||||
log.debug("Found %s", repr(key))
|
||||
return conf
|
||||
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
|
||||
def _resolve_authconfig_credstore(self, registry, credstore_name):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = INDEX_URL
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
store = self._get_store_instance(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
res = {
|
||||
'ServerAddress': registry,
|
||||
}
|
||||
if data['Username'] == TOKEN_USERNAME:
|
||||
res['IdentityToken'] = data['Secret']
|
||||
else:
|
||||
res.update({
|
||||
'Username': data['Username'],
|
||||
'Password': data['Secret'],
|
||||
})
|
||||
return res
|
||||
except CredentialsNotFound:
|
||||
log.debug('No entry found')
|
||||
return None
|
||||
except StoreError as e:
|
||||
raise errors.DockerException(
|
||||
'Credentials store error: {0}'.format(repr(e))
|
||||
)
|
||||
|
||||
def _get_store_instance(self, name):
|
||||
if name not in self._stores:
|
||||
self._stores[name] = Store(
|
||||
name, environment=self._credstore_env
|
||||
)
|
||||
return self._stores[name]
|
||||
|
||||
def get_credential_store(self, registry):
|
||||
if not registry or registry == INDEX_NAME:
|
||||
registry = INDEX_URL
|
||||
|
||||
return self.cred_helpers.get(registry) or self.creds_store
|
||||
|
||||
def get_all_credentials(self):
|
||||
auth_data = self.auths.copy()
|
||||
if self.creds_store:
|
||||
# Retrieve all credentials from the default store
|
||||
store = self._get_store_instance(self.creds_store)
|
||||
for k in store.list().keys():
|
||||
auth_data[k] = self._resolve_authconfig_credstore(
|
||||
k, self.creds_store
|
||||
)
|
||||
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||
|
||||
# credHelpers entries take priority over all others
|
||||
for reg, store_name in self.cred_helpers.items():
|
||||
auth_data[reg] = self._resolve_authconfig_credstore(
|
||||
reg, store_name
|
||||
)
|
||||
auth_data[convert_to_hostname(reg)] = auth_data[reg]
|
||||
|
||||
return auth_data
|
||||
|
||||
def add_auth(self, reg, data):
|
||||
self['auths'][reg] = data
|
||||
|
||||
|
||||
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig, credstore_env)
|
||||
return authconfig.resolve_authconfig(registry)
|
||||
|
||||
|
||||
def convert_to_hostname(url):
|
||||
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
|
||||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, string_types):
|
||||
auth = auth.encode('ascii')
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b':', 1)
|
||||
return login.decode('utf8'), pwd.decode('utf8')
|
||||
|
||||
|
||||
def encode_header(auth):
|
||||
auth_json = json.dumps(auth).encode('ascii')
|
||||
return base64.urlsafe_b64encode(auth_json)
|
||||
|
||||
|
||||
def parse_auth(entries, raise_on_error=False):
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
return AuthConfig.parse_auth(entries, raise_on_error)
|
||||
|
||||
|
||||
def load_config(config_path=None, config_dict=None, credstore_env=None):
|
||||
return AuthConfig.load_config(config_path, config_dict, credstore_env)
|
||||
|
||||
|
||||
def _load_legacy_config(config_file):
|
||||
log.debug("Attempting to parse legacy auth file format")
|
||||
try:
|
||||
data = []
|
||||
with open(config_file) as f:
|
||||
for line in f.readlines():
|
||||
data.append(line.strip().split(' = ')[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid or empty configuration file!'
|
||||
)
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
return {'auths': {
|
||||
INDEX_NAME: {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': data[1],
|
||||
'serveraddress': INDEX_URL,
|
||||
}
|
||||
}}
|
||||
except Exception as e:
|
||||
log.debug(e)
|
||||
pass
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
|
||||
MINIMUM_DOCKER_API_VERSION = '1.21'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = [
|
||||
'memory', 'memswap', 'cpushares', 'cpusetcpus'
|
||||
]
|
||||
|
||||
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
|
||||
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
|
||||
|
||||
BYTE_UNITS = {
|
||||
'b': 1,
|
||||
'k': 1024,
|
||||
'm': 1024 * 1024,
|
||||
'g': 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
|
||||
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
|
||||
|
||||
DEFAULT_USER_AGENT = "ansible-community.docker"
|
||||
DEFAULT_NUM_POOLS = 25
|
||||
|
||||
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||
# use up to 9, leaving the final session for the underlying SSH connection.
|
||||
# For more details see: https://github.com/docker/docker-py/issues/2246
|
||||
DEFAULT_NUM_POOLS_SSH = 9
|
||||
|
||||
DEFAULT_MAX_POOL_SIZE = 10
|
||||
|
||||
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
@ -0,0 +1,242 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from .. import errors
|
||||
|
||||
from .config import (
|
||||
METAFILE,
|
||||
get_current_context_name,
|
||||
get_meta_dir,
|
||||
write_context_name_to_docker_config,
|
||||
)
|
||||
from .context import Context
|
||||
|
||||
|
||||
def create_default_context():
|
||||
host = None
|
||||
if os.environ.get('DOCKER_HOST'):
|
||||
host = os.environ.get('DOCKER_HOST')
|
||||
return Context("default", "swarm", host, description="Current DOCKER_HOST based configuration")
|
||||
|
||||
|
||||
class ContextAPI(object):
|
||||
"""Context API.
|
||||
Contains methods for context management:
|
||||
create, list, remove, get, inspect.
|
||||
"""
|
||||
DEFAULT_CONTEXT = None
|
||||
|
||||
@classmethod
|
||||
def get_default_context(cls):
|
||||
context = cls.DEFAULT_CONTEXT
|
||||
if context is None:
|
||||
context = create_default_context()
|
||||
cls.DEFAULT_CONTEXT = context
|
||||
return context
|
||||
|
||||
@classmethod
|
||||
def create_context(
|
||||
cls, name, orchestrator=None, host=None, tls_cfg=None,
|
||||
default_namespace=None, skip_tls_verify=False):
|
||||
"""Creates a new context.
|
||||
Returns:
|
||||
(Context): a Context object.
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextAlreadyExists`
|
||||
If a context with the name already exists.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.create_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException(
|
||||
'"default" is a reserved context name')
|
||||
ctx = Context.load_context(name)
|
||||
if ctx:
|
||||
raise errors.ContextAlreadyExists(name)
|
||||
endpoint = "docker"
|
||||
if orchestrator and orchestrator != "swarm":
|
||||
endpoint = orchestrator
|
||||
ctx = Context(name, orchestrator)
|
||||
ctx.set_endpoint(
|
||||
endpoint, host, tls_cfg,
|
||||
skip_tls_verify=skip_tls_verify,
|
||||
def_namespace=default_namespace)
|
||||
ctx.save()
|
||||
return ctx
|
||||
|
||||
@classmethod
|
||||
def get_context(cls, name=None):
|
||||
"""Retrieves a context object.
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.get_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
name = get_current_context_name()
|
||||
if name == "default":
|
||||
return cls.get_default_context()
|
||||
return Context.load_context(name)
|
||||
|
||||
@classmethod
|
||||
def contexts(cls):
|
||||
"""Context list.
|
||||
Returns:
|
||||
(Context): List of context objects.
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If something goes wrong.
|
||||
"""
|
||||
names = []
|
||||
for dirname, dummy, fnames in os.walk(get_meta_dir()):
|
||||
for filename in fnames:
|
||||
if filename == METAFILE:
|
||||
filepath = os.path.join(dirname, filename)
|
||||
try:
|
||||
with open(filepath, "r") as f:
|
||||
data = json.load(f)
|
||||
name = data["Name"]
|
||||
if name == "default":
|
||||
raise ValueError('"default" is a reserved context name')
|
||||
names.append(name)
|
||||
except Exception as e:
|
||||
raise_from(errors.ContextException(
|
||||
"Failed to load metafile {filepath}: {e}".format(filepath=filepath, e=e),
|
||||
), e)
|
||||
|
||||
contexts = [cls.get_default_context()]
|
||||
for name in names:
|
||||
context = Context.load_context(name)
|
||||
if not context:
|
||||
raise errors.ContextException("Context {context} cannot be found".format(context=name))
|
||||
contexts.append(context)
|
||||
return contexts
|
||||
|
||||
@classmethod
|
||||
def get_current_context(cls):
|
||||
"""Get current context.
|
||||
Returns:
|
||||
(Context): current context object.
|
||||
"""
|
||||
return cls.get_context()
|
||||
|
||||
@classmethod
|
||||
def set_current_context(cls, name="default"):
|
||||
ctx = cls.get_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
err = write_context_name_to_docker_config(name)
|
||||
if err:
|
||||
raise errors.ContextException(
|
||||
'Failed to set current context: {err}'.format(err=err))
|
||||
|
||||
@classmethod
|
||||
def remove_context(cls, name):
|
||||
"""Remove a context. Similar to the ``docker context rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException(
|
||||
'context "default" cannot be removed')
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
if name == get_current_context_name():
|
||||
write_context_name_to_docker_config(None)
|
||||
ctx.remove()
|
||||
|
||||
@classmethod
|
||||
def inspect_context(cls, name="default"):
|
||||
"""Inspect a context. Similar to the ``docker context inspect`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
return cls.get_default_context()()
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
return ctx()
|
@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||
from ..utils.config import find_config_file, get_default_config_file
|
||||
from ..utils.utils import parse_host
|
||||
|
||||
METAFILE = "meta.json"
|
||||
|
||||
|
||||
def get_current_context_name_with_source():
|
||||
if os.environ.get('DOCKER_HOST'):
|
||||
return "default", "DOCKER_HOST environment variable set"
|
||||
if os.environ.get('DOCKER_CONTEXT'):
|
||||
return os.environ['DOCKER_CONTEXT'], "DOCKER_CONTEXT environment variable set"
|
||||
docker_cfg_path = find_config_file()
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path) as f:
|
||||
return json.load(f).get("currentContext", "default"), "configuration file {file}".format(file=docker_cfg_path)
|
||||
except Exception:
|
||||
pass
|
||||
return "default", "fallback value"
|
||||
|
||||
|
||||
def get_current_context_name():
|
||||
return get_current_context_name_with_source()[0]
|
||||
|
||||
|
||||
def write_context_name_to_docker_config(name=None):
|
||||
if name == 'default':
|
||||
name = None
|
||||
docker_cfg_path = find_config_file()
|
||||
config = {}
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path) as f:
|
||||
config = json.load(f)
|
||||
except Exception as e:
|
||||
return e
|
||||
current_context = config.get("currentContext", None)
|
||||
if current_context and not name:
|
||||
del config["currentContext"]
|
||||
elif name:
|
||||
config["currentContext"] = name
|
||||
else:
|
||||
return
|
||||
if not docker_cfg_path:
|
||||
docker_cfg_path = get_default_config_file()
|
||||
try:
|
||||
with open(docker_cfg_path, "w") as f:
|
||||
json.dump(config, f, indent=4)
|
||||
except Exception as e:
|
||||
return e
|
||||
|
||||
|
||||
def get_context_id(name):
|
||||
return hashlib.sha256(name.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def get_context_dir():
|
||||
docker_cfg_path = find_config_file() or get_default_config_file()
|
||||
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
|
||||
|
||||
|
||||
def get_meta_dir(name=None):
|
||||
meta_dir = os.path.join(get_context_dir(), "meta")
|
||||
if name:
|
||||
return os.path.join(meta_dir, get_context_id(name))
|
||||
return meta_dir
|
||||
|
||||
|
||||
def get_meta_file(name):
|
||||
return os.path.join(get_meta_dir(name), METAFILE)
|
||||
|
||||
|
||||
def get_tls_dir(name=None, endpoint=""):
|
||||
context_dir = get_context_dir()
|
||||
if name:
|
||||
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
|
||||
return os.path.join(context_dir, "tls")
|
||||
|
||||
|
||||
def get_context_host(path=None, tls=False):
|
||||
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET:
|
||||
# remove http+ from default docker socket url
|
||||
if host.startswith("http+"):
|
||||
host = host[5:]
|
||||
return host
|
@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
from shutil import copyfile, rmtree
|
||||
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
from ..errors import ContextException
|
||||
from ..tls import TLSConfig
|
||||
|
||||
from .config import (
|
||||
get_context_host,
|
||||
get_meta_dir,
|
||||
get_meta_file,
|
||||
get_tls_dir,
|
||||
)
|
||||
|
||||
|
||||
IN_MEMORY = "IN MEMORY"
|
||||
|
||||
|
||||
class Context(object):
|
||||
"""A context."""
|
||||
|
||||
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
|
||||
skip_tls_verify=False, tls=False, description=None):
|
||||
if not name:
|
||||
raise Exception("Name not provided")
|
||||
self.name = name
|
||||
self.context_type = None
|
||||
self.orchestrator = orchestrator
|
||||
self.endpoints = {}
|
||||
self.tls_cfg = {}
|
||||
self.meta_path = IN_MEMORY
|
||||
self.tls_path = IN_MEMORY
|
||||
self.description = description
|
||||
|
||||
if not endpoints:
|
||||
# set default docker endpoint if no endpoint is set
|
||||
default_endpoint = "docker" if (
|
||||
not orchestrator or orchestrator == "swarm"
|
||||
) else orchestrator
|
||||
|
||||
self.endpoints = {
|
||||
default_endpoint: {
|
||||
"Host": get_context_host(host, skip_tls_verify or tls),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
# check docker endpoints
|
||||
for k, v in endpoints.items():
|
||||
if not isinstance(v, dict):
|
||||
# unknown format
|
||||
raise ContextException(
|
||||
"Unknown endpoint format for context {name}: {v}".format(name=name, v=v),
|
||||
)
|
||||
|
||||
self.endpoints[k] = v
|
||||
if k != "docker":
|
||||
continue
|
||||
|
||||
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
|
||||
host, skip_tls_verify or tls))
|
||||
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
|
||||
"SkipTLSVerify", skip_tls_verify))
|
||||
|
||||
def set_endpoint(
|
||||
self, name="docker", host=None, tls_cfg=None,
|
||||
skip_tls_verify=False, def_namespace=None):
|
||||
self.endpoints[name] = {
|
||||
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
|
||||
"SkipTLSVerify": skip_tls_verify
|
||||
}
|
||||
if def_namespace:
|
||||
self.endpoints[name]["DefaultNamespace"] = def_namespace
|
||||
|
||||
if tls_cfg:
|
||||
self.tls_cfg[name] = tls_cfg
|
||||
|
||||
def inspect(self):
|
||||
return self.__call__()
|
||||
|
||||
@classmethod
|
||||
def load_context(cls, name):
|
||||
meta = Context._load_meta(name)
|
||||
if meta:
|
||||
instance = cls(
|
||||
meta["Name"],
|
||||
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
|
||||
endpoints=meta.get("Endpoints", None),
|
||||
description=meta["Metadata"].get('Description'))
|
||||
instance.context_type = meta["Metadata"].get("Type", None)
|
||||
instance._load_certs()
|
||||
instance.meta_path = get_meta_dir(name)
|
||||
return instance
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _load_meta(cls, name):
|
||||
meta_file = get_meta_file(name)
|
||||
if not os.path.isfile(meta_file):
|
||||
return None
|
||||
|
||||
metadata = {}
|
||||
try:
|
||||
with open(meta_file) as f:
|
||||
metadata = json.load(f)
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# unknown format
|
||||
raise_from(Exception(
|
||||
"Detected corrupted meta file for context {name} : {e}".format(name=name, e=e)
|
||||
), e)
|
||||
|
||||
# for docker endpoints, set defaults for
|
||||
# Host and SkipTLSVerify fields
|
||||
for k, v in metadata["Endpoints"].items():
|
||||
if k != "docker":
|
||||
continue
|
||||
metadata["Endpoints"][k]["Host"] = v.get(
|
||||
"Host", get_context_host(None, False))
|
||||
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", True))
|
||||
|
||||
return metadata
|
||||
|
||||
def _load_certs(self):
|
||||
certs = {}
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint in self.endpoints.keys():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
continue
|
||||
ca_cert = None
|
||||
cert = None
|
||||
key = None
|
||||
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
|
||||
if filename.startswith("ca"):
|
||||
ca_cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("cert"):
|
||||
cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("key"):
|
||||
key = os.path.join(tls_dir, endpoint, filename)
|
||||
if all([cert, key]) or ca_cert:
|
||||
verify = None
|
||||
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||
"SkipTLSVerify", False):
|
||||
verify = True
|
||||
certs[endpoint] = TLSConfig(
|
||||
client_cert=(cert, key) if cert and key else None, ca_cert=ca_cert, verify=verify)
|
||||
self.tls_cfg = certs
|
||||
self.tls_path = tls_dir
|
||||
|
||||
def save(self):
|
||||
meta_dir = get_meta_dir(self.name)
|
||||
if not os.path.isdir(meta_dir):
|
||||
os.makedirs(meta_dir)
|
||||
with open(get_meta_file(self.name), "w") as f:
|
||||
f.write(json.dumps(self.Metadata))
|
||||
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
os.makedirs(os.path.join(tls_dir, endpoint))
|
||||
|
||||
ca_file = tls.ca_cert
|
||||
if ca_file:
|
||||
copyfile(ca_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(ca_file)))
|
||||
|
||||
if tls.cert:
|
||||
cert_file, key_file = tls.cert
|
||||
copyfile(cert_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(cert_file)))
|
||||
copyfile(key_file, os.path.join(
|
||||
tls_dir, endpoint, os.path.basename(key_file)))
|
||||
|
||||
self.meta_path = get_meta_dir(self.name)
|
||||
self.tls_path = get_tls_dir(self.name)
|
||||
|
||||
def remove(self):
|
||||
if os.path.isdir(self.meta_path):
|
||||
rmtree(self.meta_path)
|
||||
if os.path.isdir(self.tls_path):
|
||||
rmtree(self.tls_path)
|
||||
|
||||
def __repr__(self):
|
||||
return "<{classname}: '{name}'>".format(classname=self.__class__.__name__, name=self.name)
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps(self.__call__(), indent=2)
|
||||
|
||||
def __call__(self):
|
||||
result = self.Metadata
|
||||
result.update(self.TLSMaterial)
|
||||
result.update(self.Storage)
|
||||
return result
|
||||
|
||||
def is_docker_host(self):
|
||||
return self.context_type is None
|
||||
|
||||
@property
|
||||
def Name(self):
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def Host(self):
|
||||
if not self.orchestrator or self.orchestrator == "swarm":
|
||||
endpoint = self.endpoints.get("docker", None)
|
||||
if endpoint:
|
||||
return endpoint.get("Host", None)
|
||||
return None
|
||||
|
||||
return self.endpoints[self.orchestrator].get("Host", None)
|
||||
|
||||
@property
|
||||
def Orchestrator(self):
|
||||
return self.orchestrator
|
||||
|
||||
@property
|
||||
def Metadata(self):
|
||||
meta = {}
|
||||
if self.orchestrator:
|
||||
meta = {"StackOrchestrator": self.orchestrator}
|
||||
return {
|
||||
"Name": self.name,
|
||||
"Metadata": meta,
|
||||
"Endpoints": self.endpoints
|
||||
}
|
||||
|
||||
@property
|
||||
def TLSConfig(self):
|
||||
key = self.orchestrator
|
||||
if not key or key == "swarm":
|
||||
key = "docker"
|
||||
if key in self.tls_cfg.keys():
|
||||
return self.tls_cfg[key]
|
||||
return None
|
||||
|
||||
@property
|
||||
def TLSMaterial(self):
|
||||
certs = {}
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
cert, key = tls.cert
|
||||
certs[endpoint] = list(
|
||||
map(os.path.basename, [tls.ca_cert, cert, key]))
|
||||
return {
|
||||
"TLSMaterial": certs
|
||||
}
|
||||
|
||||
@property
|
||||
def Storage(self):
|
||||
return {
|
||||
"Storage": {
|
||||
"MetadataPath": self.meta_path,
|
||||
"TLSPath": self.tls_path
|
||||
}}
|
@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
PROGRAM_PREFIX = 'docker-credential-'
|
||||
DEFAULT_LINUX_STORE = 'secretservice'
|
||||
DEFAULT_OSX_STORE = 'osxkeychain'
|
||||
DEFAULT_WIN32_STORE = 'wincred'
|
@ -0,0 +1,38 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class StoreError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialsNotFound(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
class InitializationError(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
def process_store_error(cpe, program):
|
||||
message = cpe.output.decode('utf-8')
|
||||
if 'credentials not found in native keychain' in message:
|
||||
return CredentialsNotFound(
|
||||
'No matching credentials in {0}'.format(
|
||||
program
|
||||
)
|
||||
)
|
||||
return StoreError(
|
||||
'Credentials store {0} exited with "{1}".'.format(
|
||||
program, cpe.output.decode('utf-8').strip()
|
||||
)
|
||||
)
|
@ -0,0 +1,119 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import errno
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type
|
||||
|
||||
from . import constants
|
||||
from . import errors
|
||||
from .utils import create_environment_dict
|
||||
from .utils import find_executable
|
||||
|
||||
|
||||
class Store(object):
|
||||
def __init__(self, program, environment=None):
|
||||
""" Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
'{0} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
|
||||
def get(self, server):
|
||||
""" Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, binary_type):
|
||||
server = server.encode('utf-8')
|
||||
data = self._execute('get', server)
|
||||
result = json.loads(data.decode('utf-8'))
|
||||
|
||||
# docker-credential-pass will return an object for inexistent servers
|
||||
# whereas other helpers will exit with returncode != 0. For
|
||||
# consistency, if no significant data is returned,
|
||||
# raise CredentialsNotFound
|
||||
if result['Username'] == '' and result['Secret'] == '':
|
||||
raise errors.CredentialsNotFound(
|
||||
'No matching credentials in {0}'.format(self.program)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def store(self, server, username, secret):
|
||||
""" Store credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
data_input = json.dumps({
|
||||
'ServerURL': server,
|
||||
'Username': username,
|
||||
'Secret': secret
|
||||
}).encode('utf-8')
|
||||
return self._execute('store', data_input)
|
||||
|
||||
def erase(self, server):
|
||||
""" Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, binary_type):
|
||||
server = server.encode('utf-8')
|
||||
self._execute('erase', server)
|
||||
|
||||
def list(self):
|
||||
""" List stored credentials. Requires v0.4.0+ of the helper.
|
||||
"""
|
||||
data = self._execute('list', None)
|
||||
return json.loads(data.decode('utf-8'))
|
||||
|
||||
def _execute(self, subcmd, data_input):
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
if PY3:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd], input=data_input, env=env,
|
||||
)
|
||||
else:
|
||||
process = subprocess.Popen(
|
||||
[self.exe, subcmd], stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, env=env,
|
||||
)
|
||||
output, dummy = process.communicate(data_input)
|
||||
if process.returncode != 0:
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=process.returncode, cmd='', output=output
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
'{0} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise errors.StoreError(
|
||||
'Unexpected OS error "{0}", errno={1}'.format(
|
||||
e.strerror, e.errno
|
||||
)
|
||||
)
|
||||
return output
|
@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
if PY2:
|
||||
from distutils.spawn import find_executable as which
|
||||
else:
|
||||
from shutil import which
|
||||
|
||||
|
||||
def find_executable(executable, path=None):
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
if not PY2:
|
||||
# shutil.which() already uses PATHEXT on Windows, so on
|
||||
# Python 3 we can simply use shutil.which() in all cases.
|
||||
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
|
||||
return which(executable, path=path)
|
||||
|
||||
if sys.platform != 'win32':
|
||||
return which(executable, path)
|
||||
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
for ext in extensions:
|
||||
f = os.path.join(p, base + ext)
|
||||
if os.path.isfile(f):
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def create_environment_dict(overrides):
|
||||
"""
|
||||
Create and return a copy of os.environ with the specified overrides
|
||||
"""
|
||||
result = os.environ.copy()
|
||||
result.update(overrides or {})
|
||||
return result
|
@ -0,0 +1,224 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ._import_helper import HTTPError as _HTTPError
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.six import raise_from
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
A base class from which all other exceptions inherit.
|
||||
|
||||
If you want to catch all errors that the Docker SDK might raise,
|
||||
catch this base exception.
|
||||
"""
|
||||
|
||||
|
||||
def create_api_error_from_http_exception(e):
|
||||
"""
|
||||
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||
"""
|
||||
response = e.response
|
||||
try:
|
||||
explanation = response.json()['message']
|
||||
except ValueError:
|
||||
explanation = to_native((response.content or '').strip())
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and ('No such image' in str(explanation) or
|
||||
'not found: does not exist or no pull access'
|
||||
in str(explanation) or
|
||||
'repository does not exist' in str(explanation)):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise_from(cls(e, response=response, explanation=explanation), e)
|
||||
|
||||
|
||||
class APIError(_HTTPError, DockerException):
|
||||
"""
|
||||
An HTTP error from the API.
|
||||
"""
|
||||
def __init__(self, message, response=None, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 does not
|
||||
super(APIError, self).__init__(message)
|
||||
self.response = response
|
||||
self.explanation = explanation
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '{0} Client Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '{0} Server Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
|
||||
if self.explanation:
|
||||
message = '{0} ("{1}")'.format(message, self.explanation)
|
||||
|
||||
return message
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
if self.response is not None:
|
||||
return self.response.status_code
|
||||
|
||||
def is_error(self):
|
||||
return self.is_client_error() or self.is_server_error()
|
||||
|
||||
def is_client_error(self):
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 400 <= self.status_code < 500
|
||||
|
||||
def is_server_error(self):
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 500 <= self.status_code < 600
|
||||
|
||||
|
||||
class NotFound(APIError):
|
||||
pass
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidVersion(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgument(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class DeprecatedMethod(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class TLSParameterError(DockerException):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg + (". TLS configurations should map the Docker CLI "
|
||||
"client configurations. See "
|
||||
"https://docs.docker.com/engine/articles/https/ "
|
||||
"for API details.")
|
||||
|
||||
|
||||
class NullResource(DockerException, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class ContainerError(DockerException):
|
||||
"""
|
||||
Represents a container that has exited with a non-zero exit code.
|
||||
"""
|
||||
def __init__(self, container, exit_status, command, image, stderr):
|
||||
self.container = container
|
||||
self.exit_status = exit_status
|
||||
self.command = command
|
||||
self.image = image
|
||||
self.stderr = stderr
|
||||
|
||||
err = ": {0}".format(stderr) if stderr is not None else ""
|
||||
msg = ("Command '{0}' in image '{1}' returned non-zero exit "
|
||||
"status {2}{3}").format(command, image, exit_status, err)
|
||||
|
||||
super(ContainerError, self).__init__(msg)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason):
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class BuildError(DockerException):
|
||||
def __init__(self, reason, build_log):
|
||||
super(BuildError, self).__init__(reason)
|
||||
self.msg = reason
|
||||
self.build_log = build_log
|
||||
|
||||
|
||||
class ImageLoadError(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
def create_unexpected_kwargs_error(name, kwargs):
|
||||
quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)]
|
||||
text = ["{0}() ".format(name)]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
text.append("got unexpected keyword arguments ")
|
||||
text.append(', '.join(quoted_kwargs))
|
||||
return TypeError(''.join(text))
|
||||
|
||||
|
||||
class MissingContextParameter(DockerException):
|
||||
def __init__(self, param):
|
||||
self.param = param
|
||||
|
||||
def __str__(self):
|
||||
return ("missing parameter: {0}".format(self.param))
|
||||
|
||||
|
||||
class ContextAlreadyExists(DockerException):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context {0} already exists".format(self.name))
|
||||
|
||||
|
||||
class ContextException(DockerException):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return (self.msg)
|
||||
|
||||
|
||||
class ContextNotFound(DockerException):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context '{0}' not found".format(self.name))
|
||||
|
||||
|
||||
class MissingRequirementException(DockerException):
|
||||
def __init__(self, msg, requirement, import_exception):
|
||||
self.msg = msg
|
||||
self.requirement = requirement
|
||||
self.import_exception = import_exception
|
||||
|
||||
def __str__(self):
|
||||
return (self.msg)
|
@ -0,0 +1,119 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from . import errors
|
||||
from .transport.ssladapter import SSLHTTPAdapter
|
||||
|
||||
|
||||
class TLSConfig(object):
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
cert = None
|
||||
ca_cert = None
|
||||
verify = None
|
||||
ssl_version = None
|
||||
|
||||
def __init__(self, client_cert=None, ca_cert=None, verify=None,
|
||||
ssl_version=None, assert_hostname=None):
|
||||
# Argument compatibility/mapping with
|
||||
# https://docs.docker.com/engine/articles/https/
|
||||
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||
# here, but also disable any public/default CA pool verification by
|
||||
# leaving verify=False
|
||||
|
||||
self.assert_hostname = assert_hostname
|
||||
|
||||
# If the user provides an SSL version, we should use their preference
|
||||
if ssl_version:
|
||||
self.ssl_version = ssl_version
|
||||
elif (sys.version_info.major, sys.version_info.minor) < (3, 6):
|
||||
# If the user provides no ssl version, we should default to
|
||||
# TLSv1_2. This option is the most secure, and will work for the
|
||||
# majority of users with reasonably up-to-date software. However,
|
||||
# before doing so, detect openssl version to ensure we can support
|
||||
# it.
|
||||
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
|
||||
ssl, 'PROTOCOL_TLSv1_2'):
|
||||
# If the OpenSSL version is high enough to support TLSv1_2,
|
||||
# then we should use it.
|
||||
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
|
||||
else:
|
||||
# Otherwise, TLS v1.0 seems to be the safest default;
|
||||
# SSLv23 fails in mysterious ways:
|
||||
# https://github.com/docker/docker-py/issues/963
|
||||
self.ssl_version = ssl.PROTOCOL_TLSv1
|
||||
else:
|
||||
self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
|
||||
|
||||
# "client_cert" must have both or neither cert/key files. In
|
||||
# either case, Alert the user when both are expected, but any are
|
||||
# missing.
|
||||
|
||||
if client_cert:
|
||||
try:
|
||||
tls_cert, tls_key = client_cert
|
||||
except ValueError:
|
||||
raise errors.TLSParameterError(
|
||||
'client_cert must be a tuple of'
|
||||
' (client certificate, key file)'
|
||||
)
|
||||
|
||||
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
|
||||
not os.path.isfile(tls_key)):
|
||||
raise errors.TLSParameterError(
|
||||
'Path to a certificate and key files must be provided'
|
||||
' through the client_cert param'
|
||||
)
|
||||
self.cert = (tls_cert, tls_key)
|
||||
|
||||
# If verify is set, make sure the cert exists
|
||||
self.verify = verify
|
||||
self.ca_cert = ca_cert
|
||||
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
|
||||
raise errors.TLSParameterError(
|
||||
'Invalid CA certificate provided for `ca_cert`.'
|
||||
)
|
||||
|
||||
def configure_client(self, client):
|
||||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
client.ssl_version = self.ssl_version
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
client.verify = self.ca_cert
|
||||
else:
|
||||
client.verify = self.verify
|
||||
|
||||
if self.cert:
|
||||
client.cert = self.cert
|
||||
|
||||
client.mount('https://', SSLHTTPAdapter(
|
||||
ssl_version=self.ssl_version,
|
||||
assert_hostname=self.assert_hostname,
|
||||
))
|
@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .._import_helper import HTTPAdapter as _HTTPAdapter
|
||||
|
||||
|
||||
class BaseHTTPAdapter(_HTTPAdapter):
|
||||
def close(self):
|
||||
super(BaseHTTPAdapter, self).close()
|
||||
if hasattr(self, 'pools'):
|
||||
self.pools.clear()
|
||||
|
||||
# Hotfix for requests 2.32.0 and 2.32.1: its commit
|
||||
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
|
||||
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
|
||||
# send(), but instead call _get_connection().
|
||||
def _get_connection(self, request, *args, **kwargs):
|
||||
return self.get_connection(request.url, kwargs.get('proxies'))
|
||||
|
||||
# Fix for requests 2.32.2+:
|
||||
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
|
||||
return self.get_connection(request.url, proxies)
|
@ -0,0 +1,113 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.six.moves.queue import Empty
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class NpipeHTTPConnection(urllib3_connection.HTTPConnection, object):
|
||||
def __init__(self, npipe_path, timeout=60):
|
||||
super(NpipeHTTPConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self):
|
||||
sock = NpipeSocket()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.npipe_path)
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, npipe_path, timeout=60, maxsize=10):
|
||||
super(NpipeHTTPConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self):
|
||||
return NpipeHTTPConnection(
|
||||
self.npipe_path, self.timeout
|
||||
)
|
||||
|
||||
# When re-using connections, urllib3 tries to call select() on our
|
||||
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
||||
# _get_conn, where that check happens.
|
||||
def _get_conn(self, timeout):
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
|
||||
except Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
|
||||
'pools',
|
||||
'timeout',
|
||||
'max_pool_size']
|
||||
|
||||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
||||
self.npipe_path = base_url.replace('npipe://', '')
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(NpipeHTTPAdapter, self).__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = NpipeHTTPConnectionPool(
|
||||
self.npipe_path, self.timeout,
|
||||
maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request, proxies):
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
||||
return request.path_url
|
@ -0,0 +1,259 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import functools
|
||||
import io
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
PYWIN32_IMPORT_ERROR = None
|
||||
try:
|
||||
import win32file
|
||||
import win32pipe
|
||||
import pywintypes
|
||||
import win32event
|
||||
import win32api
|
||||
except ImportError:
|
||||
PYWIN32_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
cERROR_PIPE_BUSY = 0xe7
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
MAXIMUM_RETRY_COUNT = 10
|
||||
|
||||
|
||||
def check_closed(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
if self._closed:
|
||||
raise RuntimeError(
|
||||
'Can not reuse socket after connection was closed.'
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
class NpipeSocket(object):
|
||||
""" Partial implementation of the socket API over windows named pipes.
|
||||
This implementation is only designed to be used as a client socket,
|
||||
and server-specific methods (bind, listen, accept...) are not
|
||||
implemented.
|
||||
"""
|
||||
|
||||
def __init__(self, handle=None):
|
||||
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
|
||||
self._handle = handle
|
||||
self._closed = False
|
||||
|
||||
def accept(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind(self, address):
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
self._handle.Close()
|
||||
self._closed = True
|
||||
|
||||
@check_closed
|
||||
def connect(self, address, retry_count=0):
|
||||
try:
|
||||
handle = win32file.CreateFile(
|
||||
address,
|
||||
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
|
||||
0,
|
||||
None,
|
||||
win32file.OPEN_EXISTING,
|
||||
(cSECURITY_ANONYMOUS
|
||||
| cSECURITY_SQOS_PRESENT
|
||||
| win32file.FILE_FLAG_OVERLAPPED),
|
||||
0
|
||||
)
|
||||
except win32pipe.error as e:
|
||||
# See Remarks:
|
||||
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
|
||||
if e.winerror == cERROR_PIPE_BUSY:
|
||||
# Another program or thread has grabbed our pipe instance
|
||||
# before we got to it. Wait for availability and attempt to
|
||||
# connect again.
|
||||
retry_count = retry_count + 1
|
||||
if (retry_count < MAXIMUM_RETRY_COUNT):
|
||||
time.sleep(1)
|
||||
return self.connect(address, retry_count)
|
||||
raise e
|
||||
|
||||
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
|
||||
|
||||
self._handle = handle
|
||||
self._address = address
|
||||
|
||||
@check_closed
|
||||
def connect_ex(self, address):
|
||||
return self.connect(address)
|
||||
|
||||
@check_closed
|
||||
def detach(self):
|
||||
self._closed = True
|
||||
return self._handle
|
||||
|
||||
@check_closed
|
||||
def dup(self):
|
||||
return NpipeSocket(self._handle)
|
||||
|
||||
def getpeername(self):
|
||||
return self._address
|
||||
|
||||
def getsockname(self):
|
||||
return self._address
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ioctl(self, control, option):
|
||||
raise NotImplementedError()
|
||||
|
||||
def listen(self, backlog):
|
||||
raise NotImplementedError()
|
||||
|
||||
def makefile(self, mode=None, bufsize=None):
|
||||
if mode.strip('b') != 'r':
|
||||
raise NotImplementedError()
|
||||
rawio = NpipeFileIOBase(self)
|
||||
if bufsize is None or bufsize <= 0:
|
||||
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||
|
||||
@check_closed
|
||||
def recv(self, bufsize, flags=0):
|
||||
err, data = win32file.ReadFile(self._handle, bufsize)
|
||||
return data
|
||||
|
||||
@check_closed
|
||||
def recvfrom(self, bufsize, flags=0):
|
||||
data = self.recv(bufsize, flags)
|
||||
return (data, self._address)
|
||||
|
||||
@check_closed
|
||||
def recvfrom_into(self, buf, nbytes=0, flags=0):
|
||||
return self.recv_into(buf, nbytes, flags), self._address
|
||||
|
||||
@check_closed
|
||||
def recv_into(self, buf, nbytes=0):
|
||||
if PY2:
|
||||
return self._recv_into_py2(buf, nbytes)
|
||||
|
||||
readbuf = buf
|
||||
if not isinstance(buf, memoryview):
|
||||
readbuf = memoryview(buf)
|
||||
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
err, data = win32file.ReadFile(
|
||||
self._handle,
|
||||
readbuf[:nbytes] if nbytes else readbuf,
|
||||
overlapped
|
||||
)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
def _recv_into_py2(self, buf, nbytes):
|
||||
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
|
||||
n = len(data)
|
||||
buf[:n] = data
|
||||
return n
|
||||
|
||||
@check_closed
|
||||
def send(self, string, flags=0):
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
win32file.WriteFile(self._handle, string, overlapped)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def sendall(self, string, flags=0):
|
||||
return self.send(string, flags)
|
||||
|
||||
@check_closed
|
||||
def sendto(self, string, address):
|
||||
self.connect(address)
|
||||
return self.send(string)
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
return self.settimeout(None)
|
||||
return self.settimeout(0)
|
||||
|
||||
def settimeout(self, value):
|
||||
if value is None:
|
||||
# Blocking mode
|
||||
self._timeout = win32event.INFINITE
|
||||
elif not isinstance(value, (float, int)) or value < 0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
else:
|
||||
# Timeout mode - Value converted to milliseconds
|
||||
self._timeout = int(value * 1000)
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def setsockopt(self, level, optname, value):
|
||||
raise NotImplementedError()
|
||||
|
||||
@check_closed
|
||||
def shutdown(self, how):
|
||||
return self.close()
|
||||
|
||||
|
||||
class NpipeFileIOBase(io.RawIOBase):
|
||||
def __init__(self, npipe_socket):
|
||||
self.sock = npipe_socket
|
||||
|
||||
def close(self):
|
||||
super(NpipeFileIOBase, self).close()
|
||||
self.sock = None
|
||||
|
||||
def fileno(self):
|
||||
return self.sock.fileno()
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def readinto(self, buf):
|
||||
return self.sock.recv_into(buf)
|
||||
|
||||
def seekable(self):
|
||||
return False
|
||||
|
||||
def writable(self):
|
||||
return False
|
@ -0,0 +1,270 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils.six.moves.queue import Empty
|
||||
from ansible.module_utils.six.moves.urllib_parse import urlparse
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
PARAMIKO_IMPORT_ERROR = None
|
||||
try:
|
||||
import paramiko
|
||||
except ImportError:
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class SSHSocket(socket.socket):
|
||||
def __init__(self, host):
|
||||
super(SSHSocket, self).__init__(
|
||||
socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.host = host
|
||||
self.port = None
|
||||
self.user = None
|
||||
if ':' in self.host:
|
||||
self.host, self.port = self.host.split(':')
|
||||
if '@' in self.host:
|
||||
self.user, self.host = self.host.split('@')
|
||||
|
||||
self.proc = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
args = ['ssh']
|
||||
if self.user:
|
||||
args = args + ['-l', self.user]
|
||||
|
||||
if self.port:
|
||||
args = args + ['-p', self.port]
|
||||
|
||||
args = args + ['--', self.host, 'docker system dial-stdio']
|
||||
|
||||
preexec_func = None
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
def f():
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
preexec_func = f
|
||||
|
||||
env = dict(os.environ)
|
||||
|
||||
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||
env.pop('LD_LIBRARY_PATH', None)
|
||||
env.pop('SSL_CERT_FILE', None)
|
||||
|
||||
self.proc = subprocess.Popen(
|
||||
args,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_func)
|
||||
|
||||
def _write(self, data):
|
||||
if not self.proc or self.proc.stdin.closed:
|
||||
raise Exception('SSH subprocess not initiated.'
|
||||
'connect() must be called first.')
|
||||
written = self.proc.stdin.write(data)
|
||||
self.proc.stdin.flush()
|
||||
return written
|
||||
|
||||
def sendall(self, data):
|
||||
self._write(data)
|
||||
|
||||
def send(self, data):
|
||||
return self._write(data)
|
||||
|
||||
def recv(self, n):
|
||||
if not self.proc:
|
||||
raise Exception('SSH subprocess not initiated.'
|
||||
'connect() must be called first.')
|
||||
return self.proc.stdout.read(n)
|
||||
|
||||
def makefile(self, mode):
|
||||
if not self.proc:
|
||||
self.connect()
|
||||
if PY3:
|
||||
self.proc.stdout.channel = self
|
||||
|
||||
return self.proc.stdout
|
||||
|
||||
def close(self):
|
||||
if not self.proc or self.proc.stdin.closed:
|
||||
return
|
||||
self.proc.stdin.write(b'\n\n')
|
||||
self.proc.stdin.flush()
|
||||
self.proc.terminate()
|
||||
|
||||
|
||||
class SSHConnection(urllib3_connection.HTTPConnection, object):
|
||||
def __init__(self, ssh_transport=None, timeout=60, host=None):
|
||||
super(SSHConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.ssh_transport = ssh_transport
|
||||
self.timeout = timeout
|
||||
self.ssh_host = host
|
||||
|
||||
def connect(self):
|
||||
if self.ssh_transport:
|
||||
sock = self.ssh_transport.open_session()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.exec_command('docker system dial-stdio')
|
||||
else:
|
||||
sock = SSHSocket(self.ssh_host)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect()
|
||||
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
scheme = 'ssh'
|
||||
|
||||
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
|
||||
super(SSHConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.ssh_transport = None
|
||||
self.timeout = timeout
|
||||
if ssh_client:
|
||||
self.ssh_transport = ssh_client.get_transport()
|
||||
self.ssh_host = host
|
||||
|
||||
def _new_conn(self):
|
||||
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
|
||||
|
||||
# When re-using connections, urllib3 calls fileno() on our
|
||||
# SSH channel instance, quickly overloading our fd limit. To avoid this,
|
||||
# we override _get_conn
|
||||
def _get_conn(self, timeout):
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
|
||||
except Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class SSHHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
|
||||
]
|
||||
|
||||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
|
||||
shell_out=False):
|
||||
self.ssh_client = None
|
||||
if not shell_out:
|
||||
self._create_paramiko_client(base_url)
|
||||
self._connect()
|
||||
|
||||
self.ssh_host = base_url
|
||||
if base_url.startswith('ssh://'):
|
||||
self.ssh_host = base_url[len('ssh://'):]
|
||||
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(SSHHTTPAdapter, self).__init__()
|
||||
|
||||
def _create_paramiko_client(self, base_url):
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
self.ssh_client = paramiko.SSHClient()
|
||||
base_url = urlparse(base_url)
|
||||
self.ssh_params = {
|
||||
"hostname": base_url.hostname,
|
||||
"port": base_url.port,
|
||||
"username": base_url.username,
|
||||
}
|
||||
ssh_config_file = os.path.expanduser("~/.ssh/config")
|
||||
if os.path.exists(ssh_config_file):
|
||||
conf = paramiko.SSHConfig()
|
||||
with open(ssh_config_file) as f:
|
||||
conf.parse(f)
|
||||
host_config = conf.lookup(base_url.hostname)
|
||||
if 'proxycommand' in host_config:
|
||||
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||
host_config['proxycommand']
|
||||
)
|
||||
if 'hostname' in host_config:
|
||||
self.ssh_params['hostname'] = host_config['hostname']
|
||||
if base_url.port is None and 'port' in host_config:
|
||||
self.ssh_params['port'] = host_config['port']
|
||||
if base_url.username is None and 'user' in host_config:
|
||||
self.ssh_params['username'] = host_config['user']
|
||||
if 'identityfile' in host_config:
|
||||
self.ssh_params['key_filename'] = host_config['identityfile']
|
||||
|
||||
self.ssh_client.load_system_host_keys()
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||
|
||||
def _connect(self):
|
||||
if self.ssh_client:
|
||||
self.ssh_client.connect(**self.ssh_params)
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
if not self.ssh_client:
|
||||
return SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host
|
||||
)
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
# Connection is closed try a reconnect
|
||||
if self.ssh_client and not self.ssh_client.get_transport():
|
||||
self._connect()
|
||||
|
||||
pool = SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def close(self):
|
||||
super(SSHHTTPAdapter, self).close()
|
||||
if self.ssh_client:
|
||||
self.ssh_client.close()
|
@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
""" Resolves OpenSSL issues in some servers:
|
||||
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
https://github.com/kennethreitz/requests/pull/799
|
||||
"""
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
|
||||
PoolManager = urllib3.poolmanager.PoolManager
|
||||
|
||||
|
||||
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['assert_hostname', 'ssl_version']
|
||||
|
||||
def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
|
||||
self.ssl_version = ssl_version
|
||||
self.assert_hostname = assert_hostname
|
||||
super(SSLHTTPAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
kwargs = {
|
||||
'num_pools': connections,
|
||||
'maxsize': maxsize,
|
||||
'block': block,
|
||||
}
|
||||
if self.assert_hostname is not None:
|
||||
kwargs['assert_hostname'] = self.assert_hostname
|
||||
if self.ssl_version and self.can_override_ssl_version():
|
||||
kwargs['ssl_version'] = self.ssl_version
|
||||
|
||||
self.poolmanager = PoolManager(**kwargs)
|
||||
|
||||
def get_connection(self, *args, **kwargs):
|
||||
"""
|
||||
Ensure assert_hostname is set correctly on our pool
|
||||
|
||||
We already take care of a normal poolmanager via init_poolmanager
|
||||
|
||||
But we still need to take care of when there is a proxy poolmanager
|
||||
"""
|
||||
conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
|
||||
if self.assert_hostname is not None and conn.assert_hostname != self.assert_hostname:
|
||||
conn.assert_hostname = self.assert_hostname
|
||||
return conn
|
||||
|
||||
def can_override_ssl_version(self):
|
||||
urllib_ver = urllib3.__version__.split('-')[0]
|
||||
if urllib_ver is None:
|
||||
return False
|
||||
if urllib_ver == 'dev':
|
||||
return True
|
||||
return LooseVersion(urllib_ver) > LooseVersion('1.5')
|
@ -0,0 +1,114 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import socket
|
||||
|
||||
from ansible.module_utils.six import PY2
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class UnixHTTPConnection(urllib3_connection.HTTPConnection, object):
|
||||
|
||||
def __init__(self, base_url, unix_socket, timeout=60):
|
||||
super(UnixHTTPConnection, self).__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
self.disable_buffering = False
|
||||
|
||||
def connect(self):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.unix_socket)
|
||||
self.sock = sock
|
||||
|
||||
def putheader(self, header, *values):
|
||||
super(UnixHTTPConnection, self).putheader(header, *values)
|
||||
if header == 'Connection' and 'Upgrade' in values:
|
||||
self.disable_buffering = True
|
||||
|
||||
def response_class(self, sock, *args, **kwargs):
|
||||
if PY2:
|
||||
# FIXME: We may need to disable buffering on Py3 as well,
|
||||
# but there's no clear way to do it at the moment. See:
|
||||
# https://github.com/docker/docker-py/issues/1799
|
||||
kwargs['buffering'] = not self.disable_buffering
|
||||
|
||||
return super(UnixHTTPConnection, self).response_class(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
|
||||
super(UnixHTTPConnectionPool, self).__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.base_url = base_url
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self):
|
||||
return UnixHTTPConnection(
|
||||
self.base_url, self.socket_path, self.timeout
|
||||
)
|
||||
|
||||
|
||||
class UnixHTTPAdapter(BaseHTTPAdapter):
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['pools',
|
||||
'socket_path',
|
||||
'timeout',
|
||||
'max_pool_size']
|
||||
|
||||
def __init__(self, socket_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
||||
socket_path = socket_url.replace('http+unix://', '')
|
||||
if not socket_path.startswith('/'):
|
||||
socket_path = '/' + socket_path
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(UnixHTTPAdapter, self).__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = UnixHTTPConnectionPool(
|
||||
url, self.socket_path, self.timeout,
|
||||
maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request, proxies):
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-py/issues/811
|
||||
return request.path_url
|
@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import socket
|
||||
|
||||
from .._import_helper import urllib3
|
||||
|
||||
from ..errors import DockerException
|
||||
|
||||
|
||||
class CancellableStream(object):
|
||||
"""
|
||||
Stream wrapper for real-time events, logs, etc. from the server.
|
||||
|
||||
Example:
|
||||
>>> events = client.events()
|
||||
>>> for event in events:
|
||||
... print(event)
|
||||
>>> # and cancel from another thread
|
||||
>>> events.close()
|
||||
"""
|
||||
|
||||
def __init__(self, stream, response):
|
||||
self._stream = stream
|
||||
self._response = response
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
try:
|
||||
return next(self._stream)
|
||||
except urllib3.exceptions.ProtocolError:
|
||||
raise StopIteration
|
||||
except socket.error:
|
||||
raise StopIteration
|
||||
|
||||
next = __next__
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Closes the event streaming.
|
||||
"""
|
||||
|
||||
if not self._response.raw.closed:
|
||||
# find the underlying socket object
|
||||
# based on api.client._get_raw_response_socket
|
||||
|
||||
sock_fp = self._response.raw._fp.fp
|
||||
|
||||
if hasattr(sock_fp, 'raw'):
|
||||
sock_raw = sock_fp.raw
|
||||
|
||||
if hasattr(sock_raw, 'sock'):
|
||||
sock = sock_raw.sock
|
||||
|
||||
elif hasattr(sock_raw, '_sock'):
|
||||
sock = sock_raw._sock
|
||||
|
||||
elif hasattr(sock_fp, 'channel'):
|
||||
# We are working with a paramiko (SSH) channel, which does not
|
||||
# support cancelable streams with the current implementation
|
||||
raise DockerException(
|
||||
'Cancellable streams not supported for the SSH protocol'
|
||||
)
|
||||
else:
|
||||
sock = sock_fp._sock
|
||||
|
||||
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
|
||||
sock, urllib3.contrib.pyopenssl.WrappedSocket):
|
||||
sock = sock.socket
|
||||
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
@ -0,0 +1,305 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import tarfile
|
||||
import tempfile
|
||||
|
||||
from ansible.module_utils.six import PY3
|
||||
|
||||
from . import fnmatch
|
||||
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
|
||||
|
||||
|
||||
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
|
||||
|
||||
|
||||
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
|
||||
root = os.path.abspath(path)
|
||||
exclude = exclude or []
|
||||
dockerfile = dockerfile or (None, None)
|
||||
extra_files = []
|
||||
if dockerfile[1] is not None:
|
||||
dockerignore_contents = '\n'.join(
|
||||
(exclude or ['.dockerignore']) + [dockerfile[0]]
|
||||
)
|
||||
extra_files = [
|
||||
('.dockerignore', dockerignore_contents),
|
||||
dockerfile,
|
||||
]
|
||||
return create_archive(
|
||||
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
|
||||
root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
|
||||
)
|
||||
|
||||
|
||||
def exclude_paths(root, patterns, dockerfile=None):
|
||||
"""
|
||||
Given a root directory path and a list of .dockerignore patterns, return
|
||||
an iterator of all paths (both regular files and directories) in the root
|
||||
directory that do *not* match any of the patterns.
|
||||
|
||||
All paths returned are relative to the root.
|
||||
"""
|
||||
|
||||
if dockerfile is None:
|
||||
dockerfile = 'Dockerfile'
|
||||
|
||||
patterns.append('!' + dockerfile)
|
||||
pm = PatternMatcher(patterns)
|
||||
return set(pm.walk(root))
|
||||
|
||||
|
||||
def build_file_list(root):
|
||||
files = []
|
||||
for dirname, dirnames, fnames in os.walk(root):
|
||||
for filename in fnames + dirnames:
|
||||
longpath = os.path.join(dirname, filename)
|
||||
files.append(
|
||||
longpath.replace(root, '', 1).lstrip('/')
|
||||
)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def create_archive(root, files=None, fileobj=None, gzip=False,
|
||||
extra_files=None):
|
||||
extra_files = extra_files or []
|
||||
if not fileobj:
|
||||
fileobj = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
|
||||
if files is None:
|
||||
files = build_file_list(root)
|
||||
extra_names = set(e[0] for e in extra_files)
|
||||
for path in files:
|
||||
if path in extra_names:
|
||||
# Extra files override context files with the same name
|
||||
continue
|
||||
full_path = os.path.join(root, path)
|
||||
|
||||
i = t.gettarinfo(full_path, arcname=path)
|
||||
if i is None:
|
||||
# This happens when we encounter a socket file. We can safely
|
||||
# ignore it and proceed.
|
||||
continue
|
||||
|
||||
# Workaround https://bugs.python.org/issue32713
|
||||
if i.mtime < 0 or i.mtime > 8**11 - 1:
|
||||
i.mtime = int(i.mtime)
|
||||
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
# Windows does not keep track of the execute bit, so we make files
|
||||
# and directories executable by default.
|
||||
i.mode = i.mode & 0o755 | 0o111
|
||||
|
||||
if i.isfile():
|
||||
try:
|
||||
with open(full_path, 'rb') as f:
|
||||
t.addfile(i, f)
|
||||
except IOError:
|
||||
raise IOError(
|
||||
'Can not read file in context: {0}'.format(full_path)
|
||||
)
|
||||
else:
|
||||
# Directories, FIFOs, symlinks... do not need to be read.
|
||||
t.addfile(i, None)
|
||||
|
||||
for name, contents in extra_files:
|
||||
info = tarfile.TarInfo(name)
|
||||
contents_encoded = contents.encode('utf-8')
|
||||
info.size = len(contents_encoded)
|
||||
t.addfile(info, io.BytesIO(contents_encoded))
|
||||
|
||||
t.close()
|
||||
fileobj.seek(0)
|
||||
return fileobj
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
t = tarfile.open(mode='w', fileobj=f)
|
||||
if isinstance(dockerfile, io.StringIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
if PY3:
|
||||
raise TypeError('Please use io.BytesIO to create in-memory '
|
||||
'Dockerfiles with Python 3')
|
||||
else:
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
elif isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
else:
|
||||
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
|
||||
t.addfile(dfinfo, dockerfile)
|
||||
t.close()
|
||||
f.seek(0)
|
||||
return f
|
||||
|
||||
|
||||
def split_path(p):
|
||||
return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
|
||||
|
||||
|
||||
def normalize_slashes(p):
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return '/'.join(split_path(p))
|
||||
return p
|
||||
|
||||
|
||||
def walk(root, patterns, default=True):
|
||||
pm = PatternMatcher(patterns)
|
||||
return pm.walk(root)
|
||||
|
||||
|
||||
# Heavily based on
|
||||
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||
class PatternMatcher(object):
|
||||
def __init__(self, patterns):
|
||||
self.patterns = list(filter(
|
||||
lambda p: p.dirs, [Pattern(p) for p in patterns]
|
||||
))
|
||||
self.patterns.append(Pattern('!.dockerignore'))
|
||||
|
||||
def matches(self, filepath):
|
||||
matched = False
|
||||
parent_path = os.path.dirname(filepath)
|
||||
parent_path_dirs = split_path(parent_path)
|
||||
|
||||
for pattern in self.patterns:
|
||||
negative = pattern.exclusion
|
||||
match = pattern.match(filepath)
|
||||
if not match and parent_path != '':
|
||||
if len(pattern.dirs) <= len(parent_path_dirs):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
|
||||
)
|
||||
|
||||
if match:
|
||||
matched = not negative
|
||||
|
||||
return matched
|
||||
|
||||
def walk(self, root):
|
||||
def rec_walk(current_dir):
|
||||
for f in os.listdir(current_dir):
|
||||
fpath = os.path.join(
|
||||
os.path.relpath(current_dir, root), f
|
||||
)
|
||||
if fpath.startswith('.' + os.path.sep):
|
||||
fpath = fpath[2:]
|
||||
match = self.matches(fpath)
|
||||
if not match:
|
||||
yield fpath
|
||||
|
||||
cur = os.path.join(root, fpath)
|
||||
if not os.path.isdir(cur) or os.path.islink(cur):
|
||||
continue
|
||||
|
||||
if match:
|
||||
# If we want to skip this file and it is a directory
|
||||
# then we should first check to see if there's an
|
||||
# excludes pattern (e.g. !dir/file) that starts with this
|
||||
# dir. If so then we cannot skip this dir.
|
||||
skip = True
|
||||
|
||||
for pat in self.patterns:
|
||||
if not pat.exclusion:
|
||||
continue
|
||||
if pat.cleaned_pattern.startswith(
|
||||
normalize_slashes(fpath)):
|
||||
skip = False
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
for sub in rec_walk(cur):
|
||||
yield sub
|
||||
|
||||
return rec_walk(root)
|
||||
|
||||
|
||||
class Pattern(object):
|
||||
def __init__(self, pattern_str):
|
||||
self.exclusion = False
|
||||
if pattern_str.startswith('!'):
|
||||
self.exclusion = True
|
||||
pattern_str = pattern_str[1:]
|
||||
|
||||
self.dirs = self.normalize(pattern_str)
|
||||
self.cleaned_pattern = '/'.join(self.dirs)
|
||||
|
||||
@classmethod
|
||||
def normalize(cls, p):
|
||||
|
||||
# Remove trailing spaces
|
||||
p = p.strip()
|
||||
|
||||
# Leading and trailing slashes are not relevant. Yes,
|
||||
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||
# components are not relevant either, even if the whole
|
||||
# pattern is only ".", as the Docker reference states: "For
|
||||
# historical reasons, the pattern . is ignored."
|
||||
# ".." component must be cleared with the potential previous
|
||||
# component, regardless of whether it exists: "A preprocessing
|
||||
# step [...] eliminates . and .. elements using Go's
|
||||
# filepath.".
|
||||
i = 0
|
||||
split = split_path(p)
|
||||
while i < len(split):
|
||||
if split[i] == '..':
|
||||
del split[i]
|
||||
if i > 0:
|
||||
del split[i - 1]
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
return split
|
||||
|
||||
def match(self, filepath):
|
||||
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
|
||||
|
||||
|
||||
def process_dockerfile(dockerfile, path):
|
||||
if not dockerfile:
|
||||
return (None, None)
|
||||
|
||||
abs_dockerfile = dockerfile
|
||||
if not os.path.isabs(dockerfile):
|
||||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if IS_WINDOWS_PLATFORM and path.startswith(
|
||||
WINDOWS_LONGPATH_PREFIX):
|
||||
abs_dockerfile = '{0}{1}'.format(
|
||||
WINDOWS_LONGPATH_PREFIX,
|
||||
os.path.normpath(
|
||||
abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):]
|
||||
)
|
||||
)
|
||||
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
|
||||
os.path.relpath(abs_dockerfile, path).startswith('..')):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile) as df:
|
||||
return (
|
||||
'.dockerfile.{random:x}'.format(random=random.getrandbits(160)),
|
||||
df.read()
|
||||
)
|
||||
|
||||
# Dockerfile is inside the context - return path relative to context root
|
||||
if dockerfile == abs_dockerfile:
|
||||
# Only calculate relpath if necessary to avoid errors
|
||||
# on Windows client -> Linux Docker
|
||||
# see https://github.com/docker/compose/issues/5969
|
||||
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||
return (dockerfile, None)
|
@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
|
||||
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
|
||||
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_default_config_file():
|
||||
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
|
||||
|
||||
|
||||
def find_config_file(config_path=None):
|
||||
homedir = home_dir()
|
||||
paths = list(filter(None, [
|
||||
config_path, # 1
|
||||
config_path_from_environment(), # 2
|
||||
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
|
||||
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||
]))
|
||||
|
||||
log.debug("Trying paths: %s", repr(paths))
|
||||
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: %s", path)
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def config_path_from_environment():
|
||||
config_dir = os.environ.get('DOCKER_CONFIG')
|
||||
if not config_dir:
|
||||
return None
|
||||
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
|
||||
|
||||
|
||||
def home_dir():
|
||||
"""
|
||||
Get the user's home directory, using the same logic as the Docker Engine
|
||||
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return os.environ.get('USERPROFILE', '')
|
||||
else:
|
||||
return os.path.expanduser('~')
|
||||
|
||||
|
||||
def load_general_config(config_path=None):
|
||||
config_file = find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
return json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
# In the case of a legacy `.dockercfg` file, we will not
|
||||
# be able to load any JSON data.
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import functools
|
||||
|
||||
from .. import errors
|
||||
from . import utils
|
||||
|
||||
|
||||
def check_resource(resource_name):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, resource_id=None, *args, **kwargs):
|
||||
if resource_id is None and kwargs.get(resource_name):
|
||||
resource_id = kwargs.pop(resource_name)
|
||||
if isinstance(resource_id, dict):
|
||||
resource_id = resource_id.get('Id', resource_id.get('ID'))
|
||||
if not resource_id:
|
||||
raise errors.NullResource(
|
||||
'Resource ID was not provided'
|
||||
)
|
||||
return f(self, resource_id, *args, **kwargs)
|
||||
return wrapped
|
||||
return decorator
|
||||
|
||||
|
||||
def minimum_version(version):
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if utils.version_lt(self._version, version):
|
||||
raise errors.InvalidVersion(
|
||||
'{0} is not available for version < {1}'.format(
|
||||
f.__name__, version
|
||||
)
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def update_headers(f):
|
||||
def inner(self, *args, **kwargs):
|
||||
if 'HttpHeaders' in self._general_configs:
|
||||
if not kwargs.get('headers'):
|
||||
kwargs['headers'] = self._general_configs['HttpHeaders']
|
||||
else:
|
||||
kwargs['headers'].update(self._general_configs['HttpHeaders'])
|
||||
return f(self, *args, **kwargs)
|
||||
return inner
|
@ -0,0 +1,127 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
"""Filename matching with shell patterns.
|
||||
|
||||
fnmatch(FILENAME, PATTERN) matches according to the local convention.
|
||||
fnmatchcase(FILENAME, PATTERN) always takes case in account.
|
||||
|
||||
The functions operate by translating the pattern into a regular
|
||||
expression. They cache the compiled regular expressions for speed.
|
||||
|
||||
The function translate(PATTERN) returns a regular expression
|
||||
corresponding to PATTERN. (It does not compile it.)
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ["fnmatch", "fnmatchcase", "translate"]
|
||||
|
||||
_cache = {}
|
||||
_MAXCACHE = 100
|
||||
|
||||
|
||||
def _purge():
|
||||
"""Clear the pattern cache"""
|
||||
_cache.clear()
|
||||
|
||||
|
||||
def fnmatch(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
|
||||
Patterns are Unix shell style:
|
||||
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
|
||||
name = name.lower()
|
||||
pat = pat.lower()
|
||||
return fnmatchcase(name, pat)
|
||||
|
||||
|
||||
def fnmatchcase(name, pat):
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
This is a version of fnmatch() which does not case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
|
||||
try:
|
||||
re_pat = _cache[pat]
|
||||
except KeyError:
|
||||
res = translate(pat)
|
||||
if len(_cache) >= _MAXCACHE:
|
||||
_cache.clear()
|
||||
_cache[pat] = re_pat = re.compile(res)
|
||||
return re_pat.match(name) is not None
|
||||
|
||||
|
||||
def translate(pat):
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
i, n = 0, len(pat)
|
||||
res = '^'
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i + 1
|
||||
if c == '*':
|
||||
if i < n and pat[i] == '*':
|
||||
# is some flavor of "**"
|
||||
i = i + 1
|
||||
# Treat **/ as ** so eat the "/"
|
||||
if i < n and pat[i] == '/':
|
||||
i = i + 1
|
||||
if i >= n:
|
||||
# is "**EOF" - to align with .gitignore just accept all
|
||||
res = res + '.*'
|
||||
else:
|
||||
# is "**"
|
||||
# Note that this allows for any # of /'s (even 0) because
|
||||
# the .* will eat everything, even /'s
|
||||
res = res + '(.*/)?'
|
||||
else:
|
||||
# is "*" so map it to anything but "/"
|
||||
res = res + '[^/]*'
|
||||
elif c == '?':
|
||||
# "?" is any char except "/"
|
||||
res = res + '[^/]'
|
||||
elif c == '[':
|
||||
j = i
|
||||
if j < n and pat[j] == '!':
|
||||
j = j + 1
|
||||
if j < n and pat[j] == ']':
|
||||
j = j + 1
|
||||
while j < n and pat[j] != ']':
|
||||
j = j + 1
|
||||
if j >= n:
|
||||
res = res + '\\['
|
||||
else:
|
||||
stuff = pat[i:j].replace('\\', '\\\\')
|
||||
i = j + 1
|
||||
if stuff[0] == '!':
|
||||
stuff = '^' + stuff[1:]
|
||||
elif stuff[0] == '^':
|
||||
stuff = '\\' + stuff
|
||||
res = '%s[%s]' % (res, stuff)
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
|
||||
return res + '$'
|
@ -0,0 +1,89 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
|
||||
from ansible.module_utils.six import text_type
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
def stream_as_text(stream):
|
||||
"""
|
||||
Given a stream of bytes or text, if any of the items in the stream
|
||||
are bytes convert them to text.
|
||||
This function can be removed once we return text streams
|
||||
instead of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, text_type):
|
||||
data = data.decode('utf-8', 'replace')
|
||||
yield data
|
||||
|
||||
|
||||
def json_splitter(buffer):
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
|
||||
return obj, rest
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def json_stream(stream):
|
||||
"""Given a stream of text, return a stream of json objects.
|
||||
This handles streams which are inconsistently buffered (some entries may
|
||||
be newline delimited, and others are not).
|
||||
"""
|
||||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer, separator=u'\n'):
|
||||
index = buffer.find(text_type(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[:index + 1], buffer[index + 1:]
|
||||
|
||||
|
||||
def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
||||
"""Given a generator which yields strings and a splitter function,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Unlike string.split(), each chunk includes the trailing
|
||||
separator, except for the last one if none was found on the end
|
||||
of the input.
|
||||
"""
|
||||
splitter = splitter or line_splitter
|
||||
buffered = text_type('')
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
while True:
|
||||
buffer_split = splitter(buffered)
|
||||
if buffer_split is None:
|
||||
break
|
||||
|
||||
item, buffered = buffer_split
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e)
|
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
|
||||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
||||
def add_port_mapping(port_bindings, internal_port, external):
|
||||
if internal_port in port_bindings:
|
||||
port_bindings[internal_port].append(external)
|
||||
else:
|
||||
port_bindings[internal_port] = [external]
|
||||
|
||||
|
||||
def add_port(port_bindings, internal_port_range, external_range):
|
||||
if external_range is None:
|
||||
for internal_port in internal_port_range:
|
||||
add_port_mapping(port_bindings, internal_port, None)
|
||||
else:
|
||||
ports = zip(internal_port_range, external_range)
|
||||
for internal_port, external_port in ports:
|
||||
add_port_mapping(port_bindings, internal_port, external_port)
|
||||
|
||||
|
||||
def build_port_bindings(ports):
|
||||
port_bindings = {}
|
||||
for port in ports:
|
||||
internal_port_range, external_range = split_port(port)
|
||||
add_port(port_bindings, internal_port_range, external_range)
|
||||
return port_bindings
|
||||
|
||||
|
||||
def _raise_invalid_port(port):
|
||||
raise ValueError('Invalid port "%s", should be '
|
||||
'[[remote_ip:]remote_port[-remote_port]:]'
|
||||
'port[/protocol]' % port)
|
||||
|
||||
|
||||
def port_range(start, end, proto, randomly_available_port=False):
|
||||
if not start:
|
||||
return start
|
||||
if not end:
|
||||
return [start + proto]
|
||||
if randomly_available_port:
|
||||
return ['{0}-{1}'.format(start, end) + proto]
|
||||
return [str(port) + proto for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
def split_port(port):
|
||||
if hasattr(port, 'legacy_repr'):
|
||||
# This is the worst hack, but it prevents a bug in Compose 1.14.0
|
||||
# https://github.com/docker/docker-py/issues/1668
|
||||
# TODO: remove once fixed in Compose stable
|
||||
port = port.legacy_repr()
|
||||
port = str(port)
|
||||
match = PORT_SPEC.match(port)
|
||||
if match is None:
|
||||
_raise_invalid_port(port)
|
||||
parts = match.groupdict()
|
||||
|
||||
host = parts['host']
|
||||
proto = parts['proto'] or ''
|
||||
internal = port_range(parts['int'], parts['int_end'], proto)
|
||||
external = port_range(
|
||||
parts['ext'], parts['ext_end'], '', len(internal) == 1)
|
||||
|
||||
if host is None:
|
||||
if external is not None and len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
return internal, external
|
||||
else:
|
||||
if not external:
|
||||
external = [None] * len(internal)
|
||||
elif len(internal) != len(external):
|
||||
raise ValueError('Port ranges don\'t match in length')
|
||||
return internal, [(host, ext_port) for ext_port in external]
|
@ -0,0 +1,85 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .utils import format_environment
|
||||
|
||||
|
||||
class ProxyConfig(dict):
|
||||
'''
|
||||
Hold the client's proxy configuration
|
||||
'''
|
||||
@property
|
||||
def http(self):
|
||||
return self.get('http')
|
||||
|
||||
@property
|
||||
def https(self):
|
||||
return self.get('https')
|
||||
|
||||
@property
|
||||
def ftp(self):
|
||||
return self.get('ftp')
|
||||
|
||||
@property
|
||||
def no_proxy(self):
|
||||
return self.get('no_proxy')
|
||||
|
||||
@staticmethod
|
||||
def from_dict(config):
|
||||
'''
|
||||
Instantiate a new ProxyConfig from a dictionary that represents a
|
||||
client configuration, as described in `the documentation`_.
|
||||
|
||||
.. _the documentation:
|
||||
https://docs.docker.com/network/proxy/#configure-the-docker-client
|
||||
'''
|
||||
return ProxyConfig(
|
||||
http=config.get('httpProxy'),
|
||||
https=config.get('httpsProxy'),
|
||||
ftp=config.get('ftpProxy'),
|
||||
no_proxy=config.get('noProxy'),
|
||||
)
|
||||
|
||||
def get_environment(self):
|
||||
'''
|
||||
Return a dictionary representing the environment variables used to
|
||||
set the proxy settings.
|
||||
'''
|
||||
env = {}
|
||||
if self.http:
|
||||
env['http_proxy'] = env['HTTP_PROXY'] = self.http
|
||||
if self.https:
|
||||
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
|
||||
if self.ftp:
|
||||
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
|
||||
if self.no_proxy:
|
||||
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
|
||||
return env
|
||||
|
||||
def inject_proxy_environment(self, environment):
|
||||
'''
|
||||
Given a list of strings representing environment variables, prepend the
|
||||
environment variables corresponding to the proxy settings.
|
||||
'''
|
||||
if not self:
|
||||
return environment
|
||||
|
||||
proxy_env = format_environment(self.get_environment())
|
||||
if not environment:
|
||||
return proxy_env
|
||||
# It is important to prepend our variables, because we want the
|
||||
# variables defined in "environment" to take precedence.
|
||||
return proxy_env + environment
|
||||
|
||||
def __str__(self):
|
||||
return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format(
|
||||
self.http, self.https, self.ftp, self.no_proxy)
|
@ -0,0 +1,199 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import errno
|
||||
import os
|
||||
import select
|
||||
import socket as pysocket
|
||||
import struct
|
||||
|
||||
from ansible.module_utils.six import PY3, binary_type
|
||||
|
||||
from ..transport.npipesocket import NpipeSocket
|
||||
|
||||
|
||||
STDOUT = 1
|
||||
STDERR = 2
|
||||
|
||||
|
||||
class SocketError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# NpipeSockets have their own error types
|
||||
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||
NPIPE_ENDED = 109
|
||||
|
||||
|
||||
def read(socket, n=4096):
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
"""
|
||||
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
if PY3 and not isinstance(socket, NpipeSocket):
|
||||
if not hasattr(select, "poll"):
|
||||
# Limited to 1024
|
||||
select.select([socket], [], [])
|
||||
else:
|
||||
poll = select.poll()
|
||||
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||
poll.poll()
|
||||
|
||||
try:
|
||||
if hasattr(socket, 'recv'):
|
||||
return socket.recv(n)
|
||||
if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
|
||||
return socket.read(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
except Exception as e:
|
||||
is_pipe_ended = (isinstance(socket, NpipeSocket) and
|
||||
len(e.args) > 0 and
|
||||
e.args[0] == NPIPE_ENDED)
|
||||
if is_pipe_ended:
|
||||
# npipes do not support duplex sockets, so we interpret
|
||||
# a PIPE_ENDED error as a close operation (0-length read).
|
||||
return ''
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket, n):
|
||||
"""
|
||||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there is not enough data
|
||||
"""
|
||||
data = binary_type()
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
raise SocketError("Unexpected EOF")
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
def next_frame_header(socket):
|
||||
"""
|
||||
Returns the stream and size of the next frame of data waiting to be read
|
||||
from socket, according to the protocol defined here:
|
||||
|
||||
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
|
||||
"""
|
||||
try:
|
||||
data = read_exactly(socket, 8)
|
||||
except SocketError:
|
||||
return (-1, -1)
|
||||
|
||||
stream, actual = struct.unpack('>BxxxL', data)
|
||||
return (stream, actual)
|
||||
|
||||
|
||||
def frames_iter(socket, tty):
|
||||
"""
|
||||
Return a generator of frames read from socket. A frame is a tuple where
|
||||
the first item is the stream number and the second item is a chunk of data.
|
||||
|
||||
If the tty setting is enabled, the streams are multiplexed into the stdout
|
||||
stream.
|
||||
"""
|
||||
if tty:
|
||||
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
|
||||
else:
|
||||
return frames_iter_no_tty(socket)
|
||||
|
||||
|
||||
def frames_iter_no_tty(socket):
|
||||
"""
|
||||
Returns a generator of data read from the socket when the tty setting is
|
||||
not enabled.
|
||||
"""
|
||||
while True:
|
||||
(stream, n) = next_frame_header(socket)
|
||||
if n < 0:
|
||||
break
|
||||
while n > 0:
|
||||
result = read(socket, n)
|
||||
if result is None:
|
||||
continue
|
||||
data_length = len(result)
|
||||
if data_length == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
n -= data_length
|
||||
yield (stream, result)
|
||||
|
||||
|
||||
def frames_iter_tty(socket):
|
||||
"""
|
||||
Return a generator of data read from the socket when the tty setting is
|
||||
enabled.
|
||||
"""
|
||||
while True:
|
||||
result = read(socket)
|
||||
if len(result) == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
yield result
|
||||
|
||||
|
||||
def consume_socket_output(frames, demux=False):
|
||||
"""
|
||||
Iterate through frames read from the socket and return the result.
|
||||
|
||||
Args:
|
||||
|
||||
demux (bool):
|
||||
If False, stdout and stderr are multiplexed, and the result is the
|
||||
concatenation of all the frames. If True, the streams are
|
||||
demultiplexed, and the result is a 2-tuple where each item is the
|
||||
concatenation of frames belonging to the same stream.
|
||||
"""
|
||||
if demux is False:
|
||||
# If the streams are multiplexed, the generator returns strings, that
|
||||
# we just need to concatenate.
|
||||
return binary_type().join(frames)
|
||||
|
||||
# If the streams are demultiplexed, the generator yields tuples
|
||||
# (stdout, stderr)
|
||||
out = [None, None]
|
||||
for frame in frames:
|
||||
# It is guaranteed that for each frame, one and only one stream
|
||||
# is not None.
|
||||
if frame == (None, None):
|
||||
raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
|
||||
if frame[0] is not None:
|
||||
if out[0] is None:
|
||||
out[0] = frame[0]
|
||||
else:
|
||||
out[0] += frame[0]
|
||||
else:
|
||||
if out[1] is None:
|
||||
out[1] = frame[1]
|
||||
else:
|
||||
out[1] += frame[1]
|
||||
return tuple(out)
|
||||
|
||||
|
||||
def demux_adaptor(stream_id, data):
|
||||
"""
|
||||
Utility to demultiplex stdout and stderr when reading frames from the
|
||||
socket.
|
||||
"""
|
||||
if stream_id == STDOUT:
|
||||
return (data, None)
|
||||
elif stream_id == STDERR:
|
||||
return (None, data)
|
||||
else:
|
||||
raise ValueError('{0} is not a valid stream'.format(stream_id))
|
@ -0,0 +1,528 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shlex
|
||||
import string
|
||||
from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
|
||||
|
||||
from ansible.module_utils.six import PY2, PY3, binary_type, integer_types, iteritems, string_types, text_type
|
||||
|
||||
from .. import errors
|
||||
from ..constants import DEFAULT_HTTP_HOST
|
||||
from ..constants import DEFAULT_UNIX_SOCKET
|
||||
from ..constants import DEFAULT_NPIPE
|
||||
from ..constants import BYTE_UNITS
|
||||
from ..tls import TLSConfig
|
||||
|
||||
if PY2:
|
||||
from urlparse import urlparse, urlunparse
|
||||
else:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
|
||||
URLComponents = collections.namedtuple(
|
||||
'URLComponents',
|
||||
'scheme netloc url params query fragment',
|
||||
)
|
||||
|
||||
|
||||
def create_ipam_pool(*args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_ipam_pool has been removed. Please use a '
|
||||
'docker.types.IPAMPool object instead.'
|
||||
)
|
||||
|
||||
|
||||
def create_ipam_config(*args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_ipam_config has been removed. Please use a '
|
||||
'docker.types.IPAMConfig object instead.'
|
||||
)
|
||||
|
||||
|
||||
def decode_json_header(header):
|
||||
data = base64.b64decode(header)
|
||||
if PY3:
|
||||
data = data.decode('utf-8')
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def compare_version(v1, v2):
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
elif s1 > s2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def version_lt(v1, v2):
|
||||
return compare_version(v1, v2) > 0
|
||||
|
||||
|
||||
def version_gte(v1, v2):
|
||||
return not version_lt(v1, v2)
|
||||
|
||||
|
||||
def _convert_port_binding(binding):
|
||||
result = {'HostIp': '', 'HostPort': ''}
|
||||
if isinstance(binding, tuple):
|
||||
if len(binding) == 2:
|
||||
result['HostPort'] = binding[1]
|
||||
result['HostIp'] = binding[0]
|
||||
elif isinstance(binding[0], string_types):
|
||||
result['HostIp'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding[0]
|
||||
elif isinstance(binding, dict):
|
||||
if 'HostPort' in binding:
|
||||
result['HostPort'] = binding['HostPort']
|
||||
if 'HostIp' in binding:
|
||||
result['HostIp'] = binding['HostIp']
|
||||
else:
|
||||
raise ValueError(binding)
|
||||
else:
|
||||
result['HostPort'] = binding
|
||||
|
||||
if result['HostPort'] is None:
|
||||
result['HostPort'] = ''
|
||||
else:
|
||||
result['HostPort'] = str(result['HostPort'])
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def convert_port_bindings(port_bindings):
|
||||
result = {}
|
||||
for k, v in iteritems(port_bindings):
|
||||
key = str(k)
|
||||
if '/' not in key:
|
||||
key += '/tcp'
|
||||
if isinstance(v, list):
|
||||
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def convert_volume_binds(binds):
|
||||
if isinstance(binds, list):
|
||||
return binds
|
||||
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(k, binary_type):
|
||||
k = k.decode('utf-8')
|
||||
|
||||
if isinstance(v, dict):
|
||||
if 'ro' in v and 'mode' in v:
|
||||
raise ValueError(
|
||||
'Binding cannot contain both "ro" and "mode": {0}'
|
||||
.format(repr(v))
|
||||
)
|
||||
|
||||
bind = v['bind']
|
||||
if isinstance(bind, binary_type):
|
||||
bind = bind.decode('utf-8')
|
||||
|
||||
if 'ro' in v:
|
||||
mode = 'ro' if v['ro'] else 'rw'
|
||||
elif 'mode' in v:
|
||||
mode = v['mode']
|
||||
else:
|
||||
mode = 'rw'
|
||||
|
||||
# NOTE: this is only relevant for Linux hosts
|
||||
# (does not apply in Docker Desktop)
|
||||
propagation_modes = [
|
||||
'rshared',
|
||||
'shared',
|
||||
'rslave',
|
||||
'slave',
|
||||
'rprivate',
|
||||
'private',
|
||||
]
|
||||
if 'propagation' in v and v['propagation'] in propagation_modes:
|
||||
if mode:
|
||||
mode = ','.join([mode, v['propagation']])
|
||||
else:
|
||||
mode = v['propagation']
|
||||
|
||||
result.append(
|
||||
text_type('{0}:{1}:{2}').format(k, bind, mode)
|
||||
)
|
||||
else:
|
||||
if isinstance(v, binary_type):
|
||||
v = v.decode('utf-8')
|
||||
result.append(
|
||||
text_type('{0}:{1}:rw').format(k, v)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def convert_tmpfs_mounts(tmpfs):
|
||||
if isinstance(tmpfs, dict):
|
||||
return tmpfs
|
||||
|
||||
if not isinstance(tmpfs, list):
|
||||
raise ValueError(
|
||||
'Expected tmpfs value to be either a list or a dict, found: {0}'
|
||||
.format(type(tmpfs).__name__)
|
||||
)
|
||||
|
||||
result = {}
|
||||
for mount in tmpfs:
|
||||
if isinstance(mount, string_types):
|
||||
if ":" in mount:
|
||||
name, options = mount.split(":", 1)
|
||||
else:
|
||||
name = mount
|
||||
options = ""
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected item in tmpfs list to be a string, found: {0}"
|
||||
.format(type(mount).__name__)
|
||||
)
|
||||
|
||||
result[name] = options
|
||||
return result
|
||||
|
||||
|
||||
def convert_service_networks(networks):
|
||||
if not networks:
|
||||
return networks
|
||||
if not isinstance(networks, list):
|
||||
raise TypeError('networks parameter must be a list.')
|
||||
|
||||
result = []
|
||||
for n in networks:
|
||||
if isinstance(n, string_types):
|
||||
n = {'Target': n}
|
||||
result.append(n)
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo_name):
|
||||
parts = repo_name.rsplit('@', 1)
|
||||
if len(parts) == 2:
|
||||
return tuple(parts)
|
||||
parts = repo_name.rsplit(':', 1)
|
||||
if len(parts) == 2 and '/' not in parts[1]:
|
||||
return tuple(parts)
|
||||
return repo_name, None
|
||||
|
||||
|
||||
def parse_host(addr, is_win32=False, tls=False):
|
||||
# Sensible defaults
|
||||
if not addr and is_win32:
|
||||
return DEFAULT_NPIPE
|
||||
if not addr or addr.strip() == 'unix://':
|
||||
return DEFAULT_UNIX_SOCKET
|
||||
|
||||
addr = addr.strip()
|
||||
|
||||
parsed_url = urlparse(addr)
|
||||
proto = parsed_url.scheme
|
||||
if not proto or any(x not in string.ascii_letters + '+' for x in proto):
|
||||
# https://bugs.python.org/issue754016
|
||||
parsed_url = urlparse('//' + addr, 'tcp')
|
||||
proto = 'tcp'
|
||||
|
||||
if proto == 'fd':
|
||||
raise errors.DockerException('fd protocol is not implemented')
|
||||
|
||||
# These protos are valid aliases for our library but not for the
|
||||
# official spec
|
||||
if proto == 'http' or proto == 'https':
|
||||
tls = proto == 'https'
|
||||
proto = 'tcp'
|
||||
elif proto == 'http+unix':
|
||||
proto = 'unix'
|
||||
|
||||
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
|
||||
raise errors.DockerException(
|
||||
"Invalid bind address protocol: {0}".format(addr)
|
||||
)
|
||||
|
||||
if proto == 'tcp' and not parsed_url.netloc:
|
||||
# "tcp://" is exceptionally disallowed by convention;
|
||||
# omitting a hostname for other protocols is fine
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {0}'.format(addr)
|
||||
)
|
||||
|
||||
if any([
|
||||
parsed_url.params, parsed_url.query, parsed_url.fragment,
|
||||
parsed_url.password
|
||||
]):
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {0}'.format(addr)
|
||||
)
|
||||
|
||||
if parsed_url.path and proto == 'ssh':
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: no path allowed for this protocol:'
|
||||
' {0}'.format(addr)
|
||||
)
|
||||
else:
|
||||
path = parsed_url.path
|
||||
if proto == 'unix' and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = '/'.join((parsed_url.hostname, path))
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ('tcp', 'ssh'):
|
||||
port = parsed_url.port or 0
|
||||
if port <= 0:
|
||||
port = 22 if proto == 'ssh' else (2375 if tls else 2376)
|
||||
netloc = '{0}:{1}'.format(parsed_url.netloc, port)
|
||||
|
||||
if not parsed_url.hostname:
|
||||
netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port)
|
||||
|
||||
# Rewrite schemes to fit library internals (requests adapters)
|
||||
if proto == 'tcp':
|
||||
proto = 'http{0}'.format('s' if tls else '')
|
||||
elif proto == 'unix':
|
||||
proto = 'http+unix'
|
||||
|
||||
if proto in ('http+unix', 'npipe'):
|
||||
return "{0}://{1}".format(proto, path).rstrip('/')
|
||||
return urlunparse(URLComponents(
|
||||
scheme=proto,
|
||||
netloc=netloc,
|
||||
url=path,
|
||||
params='',
|
||||
query='',
|
||||
fragment='',
|
||||
)).rstrip('/')
|
||||
|
||||
|
||||
def parse_devices(devices):
|
||||
device_list = []
|
||||
for device in devices:
|
||||
if isinstance(device, dict):
|
||||
device_list.append(device)
|
||||
continue
|
||||
if not isinstance(device, string_types):
|
||||
raise errors.DockerException(
|
||||
'Invalid device type {0}'.format(type(device))
|
||||
)
|
||||
device_mapping = device.split(':')
|
||||
if device_mapping:
|
||||
path_on_host = device_mapping[0]
|
||||
if len(device_mapping) > 1:
|
||||
path_in_container = device_mapping[1]
|
||||
else:
|
||||
path_in_container = path_on_host
|
||||
if len(device_mapping) > 2:
|
||||
permissions = device_mapping[2]
|
||||
else:
|
||||
permissions = 'rwm'
|
||||
device_list.append({
|
||||
'PathOnHost': path_on_host,
|
||||
'PathInContainer': path_in_container,
|
||||
'CgroupPermissions': permissions
|
||||
})
|
||||
return device_list
|
||||
|
||||
|
||||
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
|
||||
if not environment:
|
||||
environment = os.environ
|
||||
host = environment.get('DOCKER_HOST')
|
||||
|
||||
# empty string for cert path is the same as unset.
|
||||
cert_path = environment.get('DOCKER_CERT_PATH') or None
|
||||
|
||||
# empty string for tls verify counts as "false".
|
||||
# Any value or 'unset' counts as true.
|
||||
tls_verify = environment.get('DOCKER_TLS_VERIFY')
|
||||
if tls_verify == '':
|
||||
tls_verify = False
|
||||
else:
|
||||
tls_verify = tls_verify is not None
|
||||
enable_tls = cert_path or tls_verify
|
||||
|
||||
params = {}
|
||||
|
||||
if host:
|
||||
params['base_url'] = host
|
||||
|
||||
if not enable_tls:
|
||||
return params
|
||||
|
||||
if not cert_path:
|
||||
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
|
||||
|
||||
if not tls_verify and assert_hostname is None:
|
||||
# assert_hostname is a subset of TLS verification,
|
||||
# so if it is not set already then set it to false.
|
||||
assert_hostname = False
|
||||
|
||||
params['tls'] = TLSConfig(
|
||||
client_cert=(os.path.join(cert_path, 'cert.pem'),
|
||||
os.path.join(cert_path, 'key.pem')),
|
||||
ca_cert=os.path.join(cert_path, 'ca.pem'),
|
||||
verify=tls_verify,
|
||||
ssl_version=ssl_version,
|
||||
assert_hostname=assert_hostname,
|
||||
)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def convert_filters(filters):
|
||||
result = {}
|
||||
for k, v in iteritems(filters):
|
||||
if isinstance(v, bool):
|
||||
v = 'true' if v else 'false'
|
||||
if not isinstance(v, list):
|
||||
v = [v, ]
|
||||
result[k] = [
|
||||
str(item) if not isinstance(item, string_types) else item
|
||||
for item in v
|
||||
]
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def parse_bytes(s):
|
||||
if isinstance(s, integer_types + (float,)):
|
||||
return s
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha():
|
||||
if s[-1] == "b" or s[-1] == "B":
|
||||
s = s[:-1]
|
||||
units = BYTE_UNITS
|
||||
suffix = s[-1].lower()
|
||||
|
||||
# Check if the variable is a string representation of an int
|
||||
# without a units part. Assuming that the units are bytes.
|
||||
if suffix.isdigit():
|
||||
digits_part = s
|
||||
suffix = 'b'
|
||||
else:
|
||||
digits_part = s[:-1]
|
||||
|
||||
if suffix in units.keys() or suffix.isdigit():
|
||||
try:
|
||||
digits = float(digits_part)
|
||||
except ValueError:
|
||||
raise errors.DockerException(
|
||||
'Failed converting the string value for memory ({0}) to'
|
||||
' an integer.'.format(digits_part)
|
||||
)
|
||||
|
||||
# Reconvert to long for the final result
|
||||
s = int(digits * units[suffix])
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'The specified value for memory ({0}) should specify the'
|
||||
' units. The postfix should be one of the `b` `k` `m` `g`'
|
||||
' characters'.format(s)
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def normalize_links(links):
|
||||
if isinstance(links, dict):
|
||||
links = iteritems(links)
|
||||
|
||||
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
|
||||
|
||||
|
||||
def parse_env_file(env_file):
|
||||
"""
|
||||
Reads a line-separated environment file.
|
||||
The format of each line should be "key=value".
|
||||
"""
|
||||
environment = {}
|
||||
|
||||
with open(env_file, 'r') as f:
|
||||
for line in f:
|
||||
|
||||
if line[0] == '#':
|
||||
continue
|
||||
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parse_line = line.split('=', 1)
|
||||
if len(parse_line) == 2:
|
||||
k, v = parse_line
|
||||
environment[k] = v
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'Invalid line in environment file {0}:\n{1}'.format(
|
||||
env_file, line))
|
||||
|
||||
return environment
|
||||
|
||||
|
||||
def split_command(command):
|
||||
if PY2 and not isinstance(command, binary_type):
|
||||
command = command.encode('utf-8')
|
||||
return shlex.split(command)
|
||||
|
||||
|
||||
def format_environment(environment):
|
||||
def format_env(key, value):
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, binary_type):
|
||||
value = value.decode('utf-8')
|
||||
|
||||
return u'{key}={value}'.format(key=key, value=value)
|
||||
return [format_env(*var) for var in iteritems(environment)]
|
||||
|
||||
|
||||
def format_extra_hosts(extra_hosts, task=False):
|
||||
# Use format dictated by Swarm API if container is part of a task
|
||||
if task:
|
||||
return [
|
||||
'{0} {1}'.format(v, k) for k, v in sorted(iteritems(extra_hosts))
|
||||
]
|
||||
|
||||
return [
|
||||
'{0}:{1}'.format(k, v) for k, v in sorted(iteritems(extra_hosts))
|
||||
]
|
||||
|
||||
|
||||
def create_host_config(self, *args, **kwargs):
|
||||
raise errors.DeprecatedMethod(
|
||||
'utils.create_host_config has been removed. Please use a '
|
||||
'docker.types.HostConfig object instead.'
|
||||
)
|
@ -0,0 +1,208 @@
|
||||
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
"""
|
||||
Parse go logfmt messages.
|
||||
|
||||
See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format.
|
||||
"""
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc
|
||||
# (look for "EBNFish")
|
||||
|
||||
|
||||
class InvalidLogFmt(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _Mode(object):
|
||||
GARBAGE = 0
|
||||
KEY = 1
|
||||
EQUAL = 2
|
||||
IDENT_VALUE = 3
|
||||
QUOTED_VALUE = 4
|
||||
|
||||
|
||||
_ESCAPE_DICT = {
|
||||
'"': '"',
|
||||
'\\': '\\',
|
||||
"'": "'",
|
||||
'/': '/',
|
||||
'b': '\b',
|
||||
'f': '\f',
|
||||
'n': '\n',
|
||||
'r': '\r',
|
||||
't': '\t',
|
||||
}
|
||||
|
||||
_HEX_DICT = {
|
||||
'0': 0,
|
||||
'1': 1,
|
||||
'2': 2,
|
||||
'3': 3,
|
||||
'4': 4,
|
||||
'5': 5,
|
||||
'6': 6,
|
||||
'7': 7,
|
||||
'8': 8,
|
||||
'9': 9,
|
||||
'a': 0xA,
|
||||
'b': 0xB,
|
||||
'c': 0xC,
|
||||
'd': 0xD,
|
||||
'e': 0xE,
|
||||
'f': 0xF,
|
||||
'A': 0xA,
|
||||
'B': 0xB,
|
||||
'C': 0xC,
|
||||
'D': 0xD,
|
||||
'E': 0xE,
|
||||
'F': 0xF,
|
||||
}
|
||||
|
||||
|
||||
def _is_ident(cur):
|
||||
return cur > ' ' and cur not in ('"', '=')
|
||||
|
||||
|
||||
class _Parser(object):
|
||||
def __init__(self, line):
|
||||
self.line = line
|
||||
self.index = 0
|
||||
self.length = len(line)
|
||||
|
||||
def done(self):
|
||||
return self.index >= self.length
|
||||
|
||||
def cur(self):
|
||||
return self.line[self.index]
|
||||
|
||||
def next(self):
|
||||
self.index += 1
|
||||
|
||||
def prev(self):
|
||||
self.index -= 1
|
||||
|
||||
def parse_unicode_sequence(self):
|
||||
if self.index + 6 > self.length:
|
||||
raise InvalidLogFmt('Not enough space for unicode escape')
|
||||
if self.line[self.index:self.index + 2] != '\\u':
|
||||
raise InvalidLogFmt('Invalid unicode escape start')
|
||||
v = 0
|
||||
for i in range(self.index + 2, self.index + 6):
|
||||
v <<= 4
|
||||
try:
|
||||
v += _HEX_DICT[self.line[self.index]]
|
||||
except KeyError:
|
||||
raise InvalidLogFmt('Invalid unicode escape digit {digit!r}'.format(digit=self.line[self.index]))
|
||||
self.index += 6
|
||||
return chr(v)
|
||||
|
||||
|
||||
def parse_line(line, logrus_mode=False):
|
||||
result = {}
|
||||
parser = _Parser(line)
|
||||
key = []
|
||||
value = []
|
||||
mode = _Mode.GARBAGE
|
||||
|
||||
def handle_kv(has_no_value=False):
|
||||
k = ''.join(key)
|
||||
v = None if has_no_value else ''.join(value)
|
||||
result[k] = v
|
||||
del key[:]
|
||||
del value[:]
|
||||
|
||||
def parse_garbage(cur):
|
||||
if _is_ident(cur):
|
||||
return _Mode.KEY
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_key(cur):
|
||||
if _is_ident(cur):
|
||||
key.append(cur)
|
||||
parser.next()
|
||||
return _Mode.KEY
|
||||
elif cur == '=':
|
||||
parser.next()
|
||||
return _Mode.EQUAL
|
||||
else:
|
||||
if logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
handle_kv(has_no_value=True)
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_equal(cur):
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
elif cur == '"':
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
else:
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_ident_value(cur):
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
else:
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_quoted_value(cur):
|
||||
if cur == '\\':
|
||||
parser.next()
|
||||
if parser.done():
|
||||
raise InvalidLogFmt('Unterminated escape sequence in quoted string')
|
||||
cur = parser.cur()
|
||||
if cur in _ESCAPE_DICT:
|
||||
value.append(_ESCAPE_DICT[cur])
|
||||
elif cur != 'u':
|
||||
raise InvalidLogFmt('Unknown escape sequence {seq!r}'.format(seq='\\' + cur))
|
||||
else:
|
||||
parser.prev()
|
||||
value.append(parser.parse_unicode_sequence())
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
elif cur == '"':
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
elif cur < ' ':
|
||||
raise InvalidLogFmt('Control characters in quoted string are not allowed')
|
||||
else:
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
|
||||
parsers = {
|
||||
_Mode.GARBAGE: parse_garbage,
|
||||
_Mode.KEY: parse_key,
|
||||
_Mode.EQUAL: parse_equal,
|
||||
_Mode.IDENT_VALUE: parse_ident_value,
|
||||
_Mode.QUOTED_VALUE: parse_quoted_value,
|
||||
}
|
||||
while not parser.done():
|
||||
mode = parsers[mode](parser.cur())
|
||||
if mode == _Mode.KEY and logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
if mode == _Mode.KEY or mode == _Mode.EQUAL:
|
||||
handle_kv(has_no_value=True)
|
||||
elif mode == _Mode.IDENT_VALUE:
|
||||
handle_kv()
|
||||
elif mode == _Mode.QUOTED_VALUE:
|
||||
raise InvalidLogFmt('Unterminated quoted string')
|
||||
return result
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user