Files
oam/snippets/ansible/tasks.yml
2025-06-22 13:40:23 +02:00

1033 lines
41 KiB
YAML

---
- name: Prompt for vars
tags:
- prompt
- never
hosts: localhost
connection: local
gather_facts: false
vars_prompt:
# only works at playbook level
- name: target_db
prompt: Target DB
private: false
default: localhost
- name: db_password
prompt: DB password
default: whatever
tasks: []
- name: Run this play on up to 3 hosts at a time
hosts: large_group_of_hosts
serial: 3
tasks: []
- name: Run this play on a batch of 4 hosts, then a batch of 8, then the rest
hosts: large_group_of_hosts
serial:
- 4
- 8
- 100%
strategy: linear
tasks:
- name: Limit this task to 3 workers (or up to the current batch if lower)
throttle: 3
ansible.builtin.set_fact:
greetings: hi from {{ ansible_hostname }}
# - name: Run this task only on one single host
# run_once: true
# ansible.builtin.set_fact:
# confirm: only run on {{ ansible_hostname }}
- name: Reuse tasks
tags:
- never
hosts: localhost
connection: local
gather_facts: false
tasks: []
# - name: Statically use tasks from files
# when: false
# block:
# - name: By using absolute paths and special variables (preferred)
# ansible.builtin.import_tasks:
# file: "{{ role_path }}/tasks/install/{{ install_method }}.yml"
# - name: By using paths relative to the including file
# ansible.builtin.import_tasks:
# file: pre-flight.yml
# - name: Conditionally include tasks
# block:
# - name: by leveraging the 'with_fileglob' loop filter (preferred)
# ansible.builtin.include_tasks:
# file: "{{ item }}"
# with_fileglob: "{{ install_method }}.yml"
# - name: by checking the files' existence
# vars:
# filename: "{{ install_method }}.yml"
# when: lookup('ansible.builtin.fileglob', filename) != []
# ansible.builtin.import_tasks:
# file: "{{ filename }}"
- name: Reuse playbooks
# only works at playbook level
tags: never
vars:
var_for_playbook_1: value1
ansible.builtin.import_playbook: path/to/playbook.yml
# - name: Apply roles
# hosts: localhost
# connection: local
# gather_facts: false
# roles:
# - role_name
# - path/to/role
# - role: role_name
# - role: role_with_vars
# vars:
# var1_for_role_with_vars: value
# tasks:
# - name: Apply a role now
# ansible.builtin.import_role:
# name: role_name
- name: Integrate Ansible Vault
tags:
- vault
- never
hosts: localhost
connection: local
gather_subset:
- '!all'
- min
check_mode: true
tasks:
- name: Use encrypted values
ansible.builtin.set_fact:
var_from_encrypted_value:
# password: '1q2w3e4r', plaintext value: 'very secret string'
!vault |
$ANSIBLE_VAULT;1.1;AES256
34646464653830386631363430386432666530356364313532313336373665613038633464376335
3539363530613130623638313063363165386230646566640a313438386133366137383939336637
33333365393337326239336264623462373064383663363234353635316538356461353061646563
3037306464363439340a663430313739393439363936613862316361353330363638323065383063
39613935613035343637336537643266313737666635313730353034373736353736
- name: Use encrypted files
# The 'unvault' lookup plugin requires files to exist beforehand, but it is fine for them to be plaintext.
# The 'file' lookup plugin requires files to exist beforehand, but decrypts vault-encryped files.
tags: tls_certificate
ansible.builtin.copy:
dest: /etc/haproxy/certificate.pem
content: |
{{ lookup('ansible.builtin.unvault', 'path/to/cert/key.pem') | string | trim }}
{{ lookup('ansible.builtin.unvault', 'path/to/cert/full_chain.pem') | string | trim }}
mode: '0700'
- name: Save data to encrypted files
# Of fu*king course the 'vault' filter would use the 'filter_default' vault ID by default to encrypt content.
# Set that parameter to '' to *not* specify a vault ID.
vars:
ansible_vault_password: >-
{{ lookup('ansible.builtin.file', [playbook_dir, 'ansible_vault_password_file.txt'] | path_join) }}
ansible.builtin.copy:
dest: path/to/file
decrypt: false # necessary if the file does not exist beforehand
content: "{{ 'some string' | ansible.builtin.vault(ansible_vault_password, vault_id='') }}"
mode: '0644'
- name: Use YAML anchors
hosts: localhost
connection: local
vars:
some_reusable_task: &some_reusable_task
name: Some reusable task
tags: some_reusable_task
check_mode: false
ansible.builtin.set_fact:
some_fact: "{{ some_var | default('some value') }}"
some_reusable_tasks_block: &some_reusable_tasks_block
name: Some reusable tasks block
tags: some_reusable_tasks_block
block:
- name: Some first reusable task in block
tags: some_first_reusable_task_in_block
check_mode: false
ansible.builtin.set_fact:
some_first_fact: "{{ some_first_var | default('some first value') }}"
- name: Some nth reusable task in block
tags: some_nth_reusable_task_in_block
check_mode: false
ansible.builtin.set_fact:
some_nth_fact: "{{ some_nth_var | default('some nth value') }}"
tasks:
- *some_reusable_task
- <<: *some_reusable_task
vars:
some_var: some overridden value
- *some_reusable_tasks_block
- name: Common operations
tags: never
hosts: localhost
connection: local
gather_subset:
- '!all'
- min
check_mode: true
tasks: # ordered alphabetically by name
- name: Add authorized keys
become: true
ansible.posix.authorized_key:
user: ansible
key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0123456789abcdefghijkl/ABCDEFGHIJKL01234567 ansible@example.org
- name: Add repositories
block:
- name: To DNF/YUM
when: ansible_pkg_mgr | lower in ['dnf', 'yum']
ansible.builtin.yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add users to the sudoers
become: true
community.general.sudoers:
name: ansible
user: ansible
nopassword: true
commands: ALL
- name: Create directories recursively
ansible.builtin.file:
path: /tmp/path/to/final/dir
state: directory
mode: '0775'
- name: Create users
become: true
ansible.builtin.user:
name: ansible
- name: Define files content in tasks
ansible.builtin.copy:
dest: "{{ ansible_user_dir }}/.tmux.conf"
mode: u=rw,go=r
content: |
- name: Generate random strings
ansible.builtin.set_fact:
random_alphanumerical_lowercase_string_of_12_chars: >-
query('community.general.random_string', upper=false, special=false, length=12)
- name: Generate passwords
ansible.builtin.set_fact:
random_password: "{{ lookup('ansible.builtin.password', '/dev/null') }}"
random_password_with_requirements: >-
{{ lookup('ansible.builtin.password', '/dev/null length=32 chars=ascii_letters,digits,punctuation') }}
random_but_idempotent_password: >-
{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname, length=16) }}
- name: Get the PID of the current play
ansible.builtin.set_fact:
current_play_pid: lookup('pipe', 'echo $PPID')
- name: Look for files
ansible.builtin.set_fact:
path_list_of_all_txt_files_in_dir: "{{ lookup('ansible.builtin.fileglob', '/my/path/*.txt') }}"
- name: Install Docker Compose
block:
- name: Create Docker's CLI plugins directory
become: true
ansible.builtin.file:
dest: /usr/local/lib/docker/cli-plugins
state: directory
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Get Docker compose from its official binaries
become: true
ansible.builtin.get_url:
url: https://github.com/docker/compose/releases/latest/download/docker-compose-{{ ansible_system }}-{{ ansible_architecture }}
dest: /usr/local/lib/docker/cli-plugins/docker-compose
owner: root
group: root
mode: u=rwx,g=rx,o=rx
- name: Install packages
block:
- name: Via package manager on any supported system
ansible.builtin.package:
name:
- tmux
- screen
- name: Via PIP
ansible.builtin.pip:
name:
- bottle
- django>1.11.0,<1.12.0
- svn+http://myrepo/svn/MyApp#egg=MyApp
- git+http://myrepo/app/MyApp
- file:///path/to/MyApp.tar.gz
- name: Replace multiple strings in files
ansible.builtin.replace:
path: /path/to/file.yml
regexp: '(\s+image\:.*)stable-v.*'
replace: \g<1>custom
backup: true
- name: Gather facts about hosts previously not in inventory
delegate_to: host_previously_not_in_inventory
ansible.builtin.setup:
filter:
- '!all'
- min
- name: Apply SQL migrations from files in folder
# take files in in alphabetical order so they can have a priority set
tags: apply_sql_migrations_from_files_in_folder
community.postgresql.postgresql_query:
login_host: "{{ login_host }}"
login_port: "{{ login_port }}"
login_user: "{{ login_user }}"
login_password: "{{ login_password }}"
login_db: "{{ login_db }}"
query: "{{ lookup('ansible.builtin.file', item) }}"
loop: >-
{{
query(
'fileglob',
[ role_path, 'files', '*.sql' ] | path_join,
[ role_path, 'files', country|lower, '*.sql' ] | path_join,
) | sort
}}
- name: Unarchive files
tags: unarchive
when:
- archive_file.stat.mimetype in ["application/x-tar"]
- archive_file.stat.path is regex('\\.tar')
block:
- name: Unarchive file '{{ archive_file.stat.path | basename }}'
ansible.builtin.unarchive:
remote_src: true
src: "{{ archive_file.stat.path }}"
dest: "{{ archive_file.stat.path | dirname }}"
- name: Get the name of extracted directories ending in '.dir'
tags: find
ansible.builtin.find:
paths: "{{ archive_file.stat.path | dirname }}"
recurse: false
file_type: directory
patterns: '*.dir'
register: extracted_dirs
- name: Save the first extracted directory found
ansible.builtin.stat:
path: "{{ extracted_dirs.files[0].path }}"
register: first_extracted_dir
- name: Run containers
block:
- name: Directly
community.docker.docker_container:
name: gitlab
image: gitlab/gitlab-ce:16.11.2-ce.0
hostname: gitlab.lan
published_ports:
- "8022:22"
- "8080:80"
- "8443:443"
env:
GITLAB_OMNIBUS_CONFIG: >-
external_url 'http://gitlab.lan';
shm_size: 256m
volumes:
- ./config:/etc/gitlab:Z
- ./logs:/var/log/gitlab:Z
- ./data:/var/opt/gitlab:Z
auto_remove: true
- name: With Compose
community.docker.docker_compose_v2:
project_src: /home/user/flask
- name: Send messages to Slack channels
vars:
slack_notification_hook_url: https://hooks.slack.com/services/AB01CD23EF4/ABCD0123E/aBcDefGh0123456789iJKLmn
block:
- name: Send plain messages
ansible.builtin.uri:
url: "{{ slack_notification_hook_url }}"
method: POST
body_format: json
body:
text: (╥╯ᗝ╰╥) task XYZ failed
- name: Send mrkdwn (Slack-specific markdown) text
# FIXME: still to be tested
ansible.builtin.uri:
url: "{{ slack_notification_hook_url }}"
method: POST
body_format: json
body:
blocks:
- type: section
text:
type: mrkdwn
text: This is a *_fancy_* message
- name: Setup cronjobs
block:
- name: At specific times
# Mind this is based on the *hosts'* time.
become: true
ansible.builtin.cron:
name: Prometheus manual data backup
cron_file: prometheus-manual-data-backup
hour: 4
minute: 0
user: root
job:
# - Keep '%' characters escaped or they'll be treated as newlines.
# - Archive creation returns 1 if it detects changes to read files.
# Using ';' instead of '&&' to ignore.
>
FILENAME="/tmp/prometheus-data-$(date +'\%s-\%F-\%H-\%m-\%S').tar.gz"
&& tar -czf "$FILENAME" '/var/lib/prometheus/data'
; tar -tf "$FILENAME" > '/dev/null'
&& aws s3 cp "$FILENAME" 's3://backups/prometheus/'
&& rm "$FILENAME"
- name: Use the users' home directory for something
block:
- name: Executing commands from specified users
block:
- name: Get users' homedir back
become: true
become_user: "{{ item }}"
become_flags: -iH
check_mode: false
ansible.builtin.command: >-
echo "{{ item }}: $HOME"
changed_when: false
with_items:
- root
- ec2-user
register: users_homedir_retrieve
- name: Compute and register the results
tags: AnsibleUnsafeText_to_Dict
ansible.builtin.set_fact:
users_homedir: >-
{{
users_homedir_retrieve
| community.general.json_query('results[].stdout')
| map('from_yaml')
| combine
}}
- name: Do your thing!
become: true
become_user: "{{ item.key }}"
ansible.builtin.file:
path: "{{ item.value }}/placeholder"
state: touch
mode: '0755'
with_dict: "{{ users_homedir }}"
- name: From the system's entries
block:
- name: Get raw information from the system's entries
ansible.builtin.getent:
database: passwd
key: "{{ item }}"
split: ':'
with_items:
- root
- ec2-user
register: users_entries
- name: Compute and register the results
ansible.builtin.set_fact:
users_info: >-
{{
users_entries
| community.general.json_query('results[].ansible_facts.getent_passwd[]')
| combine
}}
- name: Do your thing!
ansible.builtin.file:
path: "{{ item.value[4] }}/placeholder"
owner: "{{ item.key }}"
state: touch
mode: '0755'
with_dict: "{{ users_info }}"
- name: Work with time
# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/to_datetime_filter.html
# https://docs.python.org/3/library/datetime.html
# The datetime format defaults to '%Y-%m-%d %H:%M:%S'.
tags: datetime
block:
- name: Add or remove time
ansible.builtin.set_fact:
one_year_from_now: "{{ '%Y-%m-%dT%H:%M:%S' | strftime( (ansible_date_time.epoch_int) + (60*60*24*365) ) }}"
one_year_from_now_date_only: "{{ '%Y-%m-%d' | strftime( now(fmt='%s')|int + (60*60*24*365) ) }}"
- name: Compare date and times
ansible.builtin.set_fact:
total_seconds_between_dates: >-
{{ ( ('2016-08-14 20:00:12' | to_datetime) - ('2015-12-25' | to_datetime('%Y-%m-%d')) ).total_seconds() }}
remaining_seconds_after_delta:
# Does NOT convert years and days to seconds.
# Use total_seconds() for that.
"{{ ( ('2016-08-14 20:00:12' | to_datetime) - ('2016-08-14 18:00:00' | to_datetime) ).seconds }}"
# This returns "7212", delta is 2 hours and 12 seconds
days_between_dates:
# Discards remaining hours, minutes, and seconds
"{{ ( ('2016-08-14 20:00:12' | to_datetime) - ('2015-12-25' | to_datetime('%Y-%m-%d')) ).days }}"
- name: Take difference between dotnet (100ns precision) and iso8601 microsecond timestamps
vars:
date1: '2022-11-15T03:23:13.6869568Z'
date2: '2021-12-15 16:06:24.400087Z'
date1_short:
# Cap to microseconds for any timestamp with higher precision
'{{ date1 | regex_replace("([^.]+)(\.\d{6})\d*(.+)", "\1\2\3") }}'
iso8601format: '%Y-%m-%dT%H:%M:%S.%fZ'
rfc3339format: '%Y-%m-%d %H:%M:%S.%fZ'
ansible.builtin.set_fact:
date_diff: >-
{{ (date1_short|to_datetime(iso8601format) - date2|to_datetime(rfc3339format)).total_seconds() }}
- name: AWS-specific operations
tags:
- aws
- never
hosts: localhost
connection: local
gather_facts: false
check_mode: true
tasks:
- name: Apply roles on different targets than the current one
block: []
# - name: Gather facts about the target EC2 instance
# when: instance_information.instance_ids | length > 0
# delegate_to: "{{ instance_information.instance_ids | first }}"
# vars:
# ansible_connection: aws_ssm
# ansible_python_interpreter: /usr/bin/python3
# ansible_remote_tmp: /tmp/.ansible-ssm-user/tmp
# ansible.builtin.gather_facts: {}
# register: fact_gathering
# - name: Apply the role to the EC2 instance
# when: fact_gathering is not skipped
# delegate_to: "{{ instance_information.instance_ids | first }}"
# delegate_facts: true
# vars:
# ansible_connection: aws_ssm
# ansible_aws_ssm_timeout: 900
# ansible_python_interpreter: /usr/bin/python3
# ansible_remote_tmp: /tmp/.ansible-ssm-user/tmp
# ansible_async_dir: /tmp/.ansible-ssm-user/async
# some_role_var: some value
# some_other_role_var: some value
# ansible.builtin.import_role:
# name: role name
- name: Get the list of current public IP ranges
# too many to be put into security group rules
ansible.builtin.set_fact:
ip_ranges: >-
lookup('url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False)
| from_json
| json_query('prefixes')
| selectattr('region', 'equalto', 'eu-west-1')
| selectattr('service', 'equalto', 'AMAZON')
| map(attribute='ip_prefix')
- name: Work with time
tags: datetime
# AWS' datetime format is '%Y-%m-%dT%H:%M:%S'.
# https://stackoverflow.com/questions/48101921/ansible-compare-difference-between-two-dates-for-the-last-hour
block: []
- name: Check the caller can access AWS' APIs
tags: check_access_to_aws_api
amazon.aws.aws_caller_info:
- name: Check the caller can connect to EC2 instances via SSM
tags: check_access_to_ec2_instances_via_ssm
vars:
ssm_bucket: someBucketWhereSsmStoresData
block:
- name: Check the caller can get information about the S3 bucket used by SSM
tags: check_list_on_ssm_bucket
amazon.aws.s3_bucket_info:
name: "{{ ssm_bucket }}"
- name: "Check the caller can act upon objects in the S3 bucket used by SSM"
tags: check_usage_on_ssm_bucket
check_mode: false
amazon.aws.s3_object:
bucket: "{{ ssm_bucket }}"
object: whatever/test.txt
content: just a test file, nothing to see here
overwrite: latest
expiry: 15
mode: "{{ item }}"
loop:
- put
- getstr
- delobj
- name: Assume roles
tags: assume_role
block:
- name: Get session tokens
amazon.aws.sts_assume_role:
access_key: AKIA1EXAMPLE1EXAMPLE # optional if defined as environment variable
secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE # optional if defined as environment variable
profile: someProfile # optional if defined as environment variable
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: someRoleSession
duration_seconds: "{{ (60 * 60 * 1) | int }}" # min 900s, must be int
register: assumed_role
- name: Use the assumed role to take action
amazon.aws.ec2_tag:
access_key: "{{ assumed_role.sts_creds.access_key }}"
secret_key: "{{ assumed_role.sts_creds.secret_key }}"
profile: null # required to use the assumed role's token, if profile is specified via environment variable
session_token: "{{ assumed_role.sts_creds.session_token }}"
resource: i-xyzxyz01
tags:
MyNewTag: value
- name: Test the remaining time for role assumption is > 5m
# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/to_datetime_filter.html
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
ansible.builtin.debug:
msg: >-
{{
dict([
[
'assumed_role.sts_creds.expiration',
assumed_role.sts_creds.expiration | to_datetime('%Y-%m-%dT%H:%M:%S+00:00')
],
[ 'now()', now() ],
[
'assumed_role.sts_creds.expiration - now()',
assumed_role.sts_creds.expiration | to_datetime('%Y-%m-%dT%H:%M:%S+00:00') - now()
],
[
'condition',
(
assumed_role.sts_creds.expiration
| to_datetime('%Y-%m-%dT%H:%M:%S+00:00') - now()
).total_seconds() > 300
],
])
}}
- name: Update the play's inventory with a newly started instance
tags:
- update_inventory
- new_instance
when: new_instance.instance_id is defined
block:
- name: Add the new instance to the play's inventory
tags: add_instance_to_inventory
ansible.builtin.add_host:
name: "{{ new_instance.instance_id }}"
ansible_python_interpreter: /usr/bin/python3
ansible_connection: community.aws.aws_ssm
ansible_aws_ssm_bucket_name: company-ssm-logs
ansible_aws_ssm_region: eu-west-1
ansible_remote_tmp: /home/ssm-user/.cache/ansible/tmp
ansible_async_dir: /home/ssm-user/.cache/ansible/async
- name: Gather facts from the instance
tags: gather_facts
delegate_to: "{{ new_instance.instance_id }}"
delegate_facts: true
ansible.builtin.gather_facts: # alternatively, use 'ansible.builtin.setup' to allow for subsets gathering
- name: DEBUG Print the new instance's host variables
delegate_to: "{{ new_instance.instance_id }}"
ansible.builtin.debug:
verbosity: 3
var: hostvars[new_instance.instance_id]
- name: Wait for AWS to realize some requests have been made
ansible.builtin.pause:
seconds: 60
- name: EC2-specific operations
block:
- name: Get running instances with 'K8S' as the 'Application' tag
amazon.aws.ec2_instance_info:
filters:
"tag:Application": K8S
instance-state-name: ["running"]
- name: Clone EC2 instances
vars:
source_instance_id: i-0123456789abcdef0
block:
- name: Get instance information from the original instance
amazon.aws.ec2_instance_info:
instance_ids:
- "{{ source_instance_id }}"
register: source_instance_info
- name: Create an AMI of the original instance
amazon.aws.ec2_ami:
instance_id: "{{ source_instance_id }}"
no_reboot: true # remove if the instance rebooting upon AMI creation is no biggie
wait: true
wait_timeout: 3600 # big volumes call for bit wait times (a 200GiB volume took )
name: ami-source
register: source_ami
- name: Use the AMI to launch clones identical to the original
when: source_ami.image_id is defined
amazon.aws.ec2_instance:
name: clone
vpc_subnet_id: "{{ source_instance_info.instances[0].subnet_id }}"
instance_type: "{{ source_instance_info.instances[0].instance_type }}"
image:
id: "{{ source_ami.image_id }}"
- name: Long-running tasks via SSM
vars:
ansible_connection: community.aws.aws_ssm
ansible_remote_tmp: /tmp/.ansible-ssm-user/tmp
ansible_async_dir: /tmp/.ansible-ssm-user/async
block:
- name: Dump a DB from an RDS instance to a temporary file
when:
- ansible_check_mode is falsy # check mode and async cannot be used on same task
- rds_instance.endpoint is defined
vars:
wanted_pattern_in_module_output: >-
{{ '"failed": 0, "started": 1, "finished": 0' | regex_escape() }}
community.postgresql.postgresql_db:
login_host: "{{ rds_instance.endpoint.address }}"
login_port: "{{ rds_instance.endpoint.port }}"
login_user: "{{ rds_instance.master_username }}"
login_password: "{{ db_password }}"
name: sales
state: dump
target: >-
[
{{ ansible_user_dir }},
dump.{{ db_instance_identifier }}.{{ ansible_date_time.iso8601_basic_short }}.dir
] | path_join
target_opts: >-
--exclude-table …
--exclude-schema archived
--no-publications
--format d --jobs $(nproc)
async: "{{ 60 * 60 * 2 }}" # wait up to 2 hours -- 60s * 60m * 2h
poll: 0 # fire and forget; ssm would not allow self-checking anyways
register: dump
changed_when:
- dump.rc == 0
- dump.module_stderr == ''
- "'started' | extract(dump.module_stdout | regex_search('{.*}') | from_json) == 1"
- "'failed' | extract(dump.module_stdout | regex_search('{.*}') | from_json) == 0"
failed_when: dump.rc != 0
- name: Check on the dump task
vars:
max_wait: "{{ (60 * 60 * 12) }}" # wait for the async task to end
ansible_aws_ssm_timeout: "{{ max_wait }}" # ssm uses a single connection, keep active until the end
dump_stdout_as_obj: "{{ dump.module_stdout | regex_search('{.*}') | from_json }}"
ansible_job_id: "{{ dump_stdout_as_obj.ansible_job_id }}"
ansible.builtin.async_status:
jid: "{{ ansible_job_id }}"
register: dump_result
until: dump_result.finished
retries: "{{ max_wait | int }}"
delay: 300 # check once every 5m to avoid overloading the ssm agent
- name: RDS-specific operations
block:
- name: Create an instance's snapshot
block:
- name: Create the snapshot
amazon.aws.rds_instance_snapshot:
db_instance_identifier: identifier-for-db-instance
db_snapshot_identifier: identifier-for-db-snapshot
register: snapshot_creation
- name: Wait for the snapshot to be in the 'available' state
when: snapshot_creation.snapshot_create_time is defined
amazon.aws.rds_snapshot_info:
db_snapshot_identifier: "{{ snapshot_creation.db_snapshot_identifier }}"
register: snapshot_check
retries: 3
delay: 120
until: snapshot_check.snapshots | selectattr('status', 'equalto', 'available') | length > 0
- name: Restore instance with automatic backup enabled to point in time
block:
- name: Restore DB instance
amazon.aws.rds_instance:
db_instance_identifier: restored-to-pitr
creation_source: instance
source_db_instance_identifier: source-instance
use_latest_restorable_time: true
tags: "{{ omit }}" # avoid setting tags, it errors out when restoring to pitr
wait:
# avoid waiting for db instances with automatic backup enabled to finish backing up the restored
# instance right after creation - db instances' first backup can take unbearably long (3h for 100GB)
false
register: pitr_restored_instance
- name: Wait for the restored DB instance to be ready
when: pitr_restored_instance.db_instance_identifier is defined
block:
- name: Wait for the restored DB instance to be ready
amazon.aws.rds_instance_info:
db_instance_identifier: "{{ pitr_restored_instance.db_instance_identifier }}"
register: pitr_restored_instance_ready_check
retries: 15
delay: 60
until:
- pitr_restored_instance_ready_check.instances[0].db_instance_status in ['available', 'backing-up']
- pitr_restored_instance_ready_check.instances[0].pending_modified_values.keys() | length == 0
- name: Update restored DB instance information
# 'amazon.aws.rds_instance' will *not* have the 'endpoint' key defined if not waiting
ansible.builtin.set_fact:
pitr_restored_instance: "{{ pitr_restored_instance_ready_check.instances[0] }}"
- name: Dump roles' privileges
block:
- name: Dump to file
environment:
PGPASSWORD: someRandomString
vars:
out_file: /tmp/instance-id_roles.sql
ansible.builtin.command: >-
pg_dumpall
--host 'instance-id.0123456789ab.eu-west-1.rds.amazonaws.com' --port '5432'
--user 'postgres' --database 'postgres' --no-password
--roles-only --no-role-passwords
--file '{{ out_file }}'
changed_when: false
- name: Dump to variable for later use through 'dump_execution.stdout_lines'
environment:
PGPASSWORD: someRandomString
ansible.builtin.command: >-
pg_dumpall
-h 'instance-id.0123456789ab.eu-west-1.rds.amazonaws.com' -p '5432'
-U 'postgres' -l 'postgres' -w
-r --no-role-passwords
changed_when: false
register: dump_execution
- name: Wait for pending changes to be applied
amazon.aws.rds_instance_info:
db_instance_identifier: identifier-for-db-instance
register: instance_check
retries: 12
delay: 15
until: instance_check.instances[0].pending_modified_values.keys() | length == 0
- name: S3-specific operations
block:
- name: Check S3 object exists
amazon.aws.s3_object_info:
bucket_name: my-bucket
object_name: prefix/object.tar
- name: Download objects from S3
# The 'amazon.aws.s3_object' module might be *not* suitable for files bigger than the executor's currently
# available memory. See <https://github.com/ansible-collections/amazon.aws/issues/2395>.
# TL:DR: at the time of writing, the module keeps downloaded data in memory before flushing it to disk,
# filling up the host's memory when downloading big files and causing it to stall or crash.
amazon.aws.s3_object:
bucket: my-bucket
object: prefix/object.tar
dest: /tmp/object.tar
mode: get
- name: Upload objects to S3
amazon.aws.s3_object:
bucket: my-bucket
object: prefix/object.tar
src: /tmp/object.tar
mode: put
- name: "AWS: Start stopped instances and add the first of them to the inventory for the next play"
# works at playbook level
tags:
- never
hosts: localhost
connection: local
check_mode: false
handlers:
- name: Add the first started host
ansible.builtin.add_host:
groups:
- tag_Application_Postgres
- tag_Component_Dumper
name: "{{ started_instances.instance_ids[0] }}"
- name: Add all started hosts with host variables
loop: "{{ started_instances.instance_ids }}"
ansible.builtin.add_host:
groups:
- tag_Application_Postgres
- tag_Component_Dumper
name: "{{ item }}"
ansible_connection: aws_ssm
ansible_aws_ssm_bucket_name: company-ssm-logs
ansible_aws_ssm_region: eu-west-1
ansible_aws_ssm_timeout: 900
ansible_remote_tmp: /tmp/.ansible-ssm-user/tmp
ansible_async_dir: /tmp/.ansible-ssm-user/async
tasks:
- name: Start the PG dumper instance
tags: dumper
amazon.aws.ec2_instance:
filters:
tag:Application: Postgres
tag:Component: Dumper
state: started
register: started_instances
notify:
- Add the first started host
- Add all started hosts with host variables
# follow up with play using hosts 'tag_Application_Postgres,&tag_Component_Dumper'
- name: AWX-specific operations
tags: never
hosts: localhost
connection: local
gather_facts: false
check_mode: true
environment:
CONTROLLER_HOST: https://awx.example.org/
CONTROLLER_VERIFY_SSL: false
CONTROLLER_USERNAME: admin
CONTROLLER_PASSWORD: somethingSecret
tasks:
- name: Export all data from existing instances
# At the time of writing: applications, credential_types, credentials, execution_environments, inventory,
# inventory_sources, job_templates, notification_templates, organizations, projects, schedules, teams, and users.
awx.awx.export:
all: true
register: awx_export_output
- name: DBLab-specific operations
tags: never
hosts: localhost
connection: local
vars:
dblab_api_port: 2346 # some operations (reset) are not available on the non-api endpoints
max_snapshot_age_in_days: 15
vars_prompt:
- name: dblab_host
private: false
- name: dblab_token
pre_tasks:
- name: Print out run's variables
ansible.builtin.debug:
verbosity: 3
msg: >-
{{
dict([
[ 'host', dblab_host ],
[ 'port', dblab_api_port ],
[ 'token', dblab_token ],
[ 'max_snapshot_age_in_days', max_snapshot_age_in_days ],
])
}}
- name: Check the snapshots
block:
- name: Gather snapshots' status
check_mode: false
ansible.builtin.uri:
url: https://{{ dblab_host }}:{{ dblab_api_port }}/api/snapshots
headers:
'verification-token': "{{ dblab_token }}"
register: snapshots_status
- name: Print out the request's results
ansible.builtin.debug:
verbosity: 3
var: snapshots_status.json
- name: Check the data is recent enough on both snapshots
ansible.builtin.assert:
that: >-
(
ansible_date_time.iso8601|to_datetime('%Y-%m-%dT%H:%M:%SZ') -
item.dataStateAt|to_datetime('%Y-%m-%dT%H:%M:%SZ')
).days < max_snapshot_age_in_days
fail_msg: snapshot {{ item.id }} for pool {{ item.pool }} is too old
loop: "{{ snapshots_status.json }}"
tasks:
- name: Reset clones on {{ dblab_host }}
tags: reset_clones
block:
- name: Gather clones on {{ dblab_host }}
check_mode: false
ansible.builtin.uri:
url: http://{{ dblab_host }}:{{ dblab_api_port }}/api/status
headers:
'verification-token': "{{ dblab_token }}"
register: instance_status
- name: Print out the request's results
ansible.builtin.debug:
verbosity: 3
var: instance_status.json
- name: Reset protected clones
vars:
clones: "{{ instance_status.json.cloning.clones | selectattr('protected', 'equalto', true) }}"
ansible.builtin.uri:
url: http://{{ dblab_host }}:{{ dblab_api_port }}/api/clone/{{ item.id }}/reset
method: POST
headers:
'verification-token': "{{ dblab_token }}"
body_format: json
body:
latest: true
loop: "{{ clones }}"
register: reset_requests
- name: Gitea-specific operations
tags:
- gitea
- never
hosts: localhost
connection: local
gather_facts: false
check_mode: true
tasks:
- name: Create a GitLab mirror repository in Gitea
tags: create_gitea_mirror_gitlab
ansible.builtin.uri:
url: https://gitea.example.org/api/v1/repos/migrate
method: POST
headers:
Authorization: token abcdef0123456789abcdef0123456789abcdef01
body_format: json
body:
auth_username: ro-access
auth_token: glpat-projAccesToken012345
clone_addr: https://gitlab.example.org/jimmy/secret-project
issues: false
labels: false
lfs: false
milestones: false
mirror: true
mirror_interval: 8h0m0s
private: true
pull_requests: false
releases: false
repo_name: secret-project
repo_owner: jimmy
service: gitlab
wiki: false
status_code: 201
- name: GitLab-specific operations
tags:
- gitlab
- never
hosts: localhost
connection: local
gather_facts: false
check_mode: true
tasks:
- name: Install configured fleeting plugins
when: runner_executor in ["docker-autoscaler", "instance"]
become: true
ansible.builtin.command:
chdir: /root
cmd: gitlab-runner fleeting install
creates: /root/.config/fleeting/plugins
- name: Create a Project Access Token for Gitea in GitLab
tags: create_gitlab_project_access_token
block:
- name: Create the access token
community.general.gitlab_project_access_token:
api_url: https://gitlab.example.org
api_token: glpat-m-PLACEHOLDER0123456
project: jimmy/secret-project
name: ro-access
expires_at: "{{ '%Y-%m-%d' | strftime( now(fmt='%s')|int + (60*60*24*365) ) }}" # 1y from now
access_level: guest
scopes:
- read_api
- read_repository
register: gitlab_project_access_token_creation
- name: Save the access token for later use
ansible.builtin.set_fact:
gitlab_project_access_token: "{{ gitlab_project_access_token_creation.access_token.token }}"
- name: Let's Encrypt-specific operations
tags: never
hosts: localhost
connection: local
gather_facts: false
check_mode: true
tasks:
# The 'acme_certificate' module takes in file paths for the certificate's files; those need either to *not* exist
# beforehand, or their content to be in specific formats.
- name: Revoke test certificates with account key
community.crypto.acme_certificate_revoke:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: path/to/acme_account.key.pem
certificate: path/to/certificate.crt.pem