56 Commits

Author SHA1 Message Date
Tuan-Dat Tran
0a3171b9bc feat(k3s): Added 2 nodes (2/2)
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2026-01-26 23:08:34 +01:00
Tuan-Dat Tran
3068a5a8fb feat(k3s): Added 2 nodesg
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2026-01-26 22:42:19 +01:00
Tuan-Dat Tran
ef652fac20 refactor: yml -> yaml
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-11-07 20:44:14 +01:00
Tuan-Dat Tran
22c1b534ab feat(k3s): Add new node and machine
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-26 10:41:11 +01:00
Tuan-Dat Tran
9cb90a8020 feat(caddy): netcup->cf
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-25 09:25:40 +02:00
Tuan-Dat Tran
d9181515bb feat(k3s): Added (temporary) node
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-19 01:33:42 +02:00
Tuan-Dat Tran
c3905ed144 feat(git): Add .gitattributes for ansible-vault git diff
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-19 00:34:51 +02:00
Tuan-Dat Tran
5fb50ab4b2 feat(k3s): Add new node
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-07 23:46:40 +02:00
Tuan-Dat Tran
2909d6e16c feat(nfs): Removed unused/removed nfs servers
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
0aed818be5 feat(docker): Removed nodes docker-host10 and docker-host12
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
fbdeec93ce feat(docker): match services that moved to k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
44626101de feat(docker): match services that moved to k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
c1d6f13275 refactor(ansible-lint): fixed ansible-lint warnings
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
282e98e90a fix(proxmox): commented 'non-errors' on script
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
9573cbfcad feat(k3s): Added 2 nodes
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-07 21:21:33 +02:00
Tuan-Dat Tran
48aec11d8c feat(common): added iscsi for longhorn on k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-07 18:17:33 +02:00
Tuan-Dat Tran
a1da69ac98 feat(proxmox): check_vm as cronjob
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-02 19:52:49 +02:00
Tuan-Dat Tran
7aa16f3207 Added blog.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 22:59:01 +02:00
Tuan-Dat Tran
fe3f1749c5 Update README.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 22:51:15 +02:00
Tuan-Dat Tran
6eef96b302 feat(pre-commit): Added linting 2025-07-27 22:46:23 +02:00
Tuan-Dat Tran
2882abfc0b Added README.md for roles
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:40:46 +02:00
Tuan-Dat Tran
2b759cc2ab Update README.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:16:35 +02:00
Tuan-Dat Tran
dbaebaee80 cleanup: services moved to argocd
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 13:58:25 +02:00
Tuan-Dat Tran
89c51aa45c feat(argo): app-of-app argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-25 07:58:41 +02:00
Tuan-Dat Tran
0139850ee3 feat(reverse_proxy): fix caddy letsencrypt
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 21:26:11 +02:00
Tuan-Dat Tran
976cad51e2 refactor(k3s): enhance cluster setup and enable ArgoCD apps
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 07:23:23 +02:00
Tuan-Dat Tran
e1a2248154 feat(kubernetes): add nfs-provisioner
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-15 23:24:52 +02:00
Tuan-Dat Tran
d8fd094379 feat(kubernetes): stable kubernetes with argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-14 22:57:13 +02:00
Tuan-Dat Tran
76000f8123 feat(kubernetes): add initial setup for ArgoCD, Cert-Manager, MetalLB, and Traefik
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 14:25:53 +02:00
Tuan-Dat Tran
4aa939426b refactor(k3s): enhance kubeconfig generation and token management
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 09:33:39 +02:00
Tuan-Dat Tran
9cce71f73b refactor(k3s): manage token securely and install guest agent
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 02:15:01 +02:00
Tuan-Dat Tran
97a5d6c41d refactor(k3s): centralize k3s primary server IP and integrate Netcup DNS
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 01:30:05 +02:00
Tuan-Dat Tran
f1b0cfad2c refactor(k3s): streamline inventory and primary server IP handling
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 00:40:48 +02:00
Tuan-Dat Tran
dac0d88d60 feat(proxmox): add k3s agents and refine VM provisioning
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 23:08:44 +02:00
Tuan-Dat Tran
609e000089 refactor(ansible): centralize inventory and variables in 'vars' directory
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 21:38:53 +02:00
Tuan-Dat Tran
3d7f652ff3 refactor(ansible): restructure inventory and remove postgres role
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 20:35:26 +02:00
Tuan-Dat Tran
cb8ccd8f00 wip
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-06-07 01:19:27 +02:00
Tuan-Dat Tran
02168225b1 wip
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-06-07 00:16:54 +02:00
Tuan-Dat Tran
6ff1ccecd0 refactor(infra): reorganize docker host VMs and service assignments
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-07 00:02:30 +02:00
Tuan-Dat Tran
de62327fde Add naruto01 to proxmox nodes
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-06 13:33:46 +02:00
Tuan-Dat Tran
b70c8408dc 2025-05-03T21:41+02:00
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-03 21:41:32 +02:00
Tuan-Dat Tran
a913e1cbc0 refactor: reorganize proxmox roles, add hardware acceleration, and update common config tasks
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-03 10:24:50 +02:00
Tuan-Dat Tran
e3c67a32e9 feat(reverse_proxy): add Netcup DNS ACME challenge support and refactor Caddy setup
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-28 23:24:29 +02:00
Tuan-Dat Tran
8f2998abc0 refactor(ansible): use ansible_user_id and add root package condition
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-27 18:15:07 +02:00
Tuan-Dat Tran
7fcee3912f refactor(ansible): refactor common role application and improve vm ssh config
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-27 17:46:41 +02:00
Tuan-Dat Tran
591342f580 feat(proxmox): refactor vm provisioning and add pci passthrough config
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 23:34:42 +02:00
Tuan-Dat Tran
f2ea03bc01 feat(proxmox): automatic vm creation
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 21:58:58 +02:00
Tuan-Dat Tran
0e8e07ed3e feat(docker): Added healthcheck
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 13:21:02 +02:00
Tuan-Dat Tran
a2a58f6343 feat(keycloak|docker): improved templating
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-25 23:37:24 +02:00
Tuan-Dat Tran
42196a32dc feat(docker): Add karakeep and keycloak services
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-24 20:24:33 +02:00
Tuan-Dat Tran
6934a9f5fc distributed secrets to group_vars and added karakeep
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-06 23:46:28 +02:00
Tuan-Dat Tran
27621aac03 Added proxmox-vm and static tagging of docker images
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-06 18:04:33 +02:00
Tuan-Dat Tran
56f058c254 moved ssh to cert based
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-03-25 01:09:08 +01:00
Tuan-Dat Tran
924e4a2f92 refactor(inventory): Reorganized inventory
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-02-07 01:54:34 +01:00
Tuan-Dat Tran
060e2425ff fix(skeleton): Fixed script and content for secrets.skeleton
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-02-07 00:09:37 +01:00
Tuan-Dat Tran
f2d489f63a refactor(structure/ansible.cfg): Changed folder structure with ansible.cfg
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-02-07 00:06:37 +01:00
204 changed files with 3901 additions and 1734 deletions

33
.ansible-lint Normal file
View File

@@ -0,0 +1,33 @@
---
# .ansible-lint
# Specify exclude paths to prevent linting vendor roles, etc.
exclude_paths:
- ./.git/
- ./.venv/
- ./galaxy_roles/
# A list of rules to skip. This is a more modern and readable alternative to 'skip_list'.
skip_list:
- experimental
- fqcn-builtins
- no-handler
- var-naming
- no-changed-when
- risky-shell-pipe
# Enforce certain rules that are not enabled by default.
enable_list:
- no-free-form
- var-spacing
- no-log-password
- no-relative-path
- command-instead-of-module
- fqcn[deep]
- no-changed-when
# Offline mode disables any features that require internet access.
offline: false
# Set the desired verbosity level.
verbosity: 1

17
.editorconfig Normal file
View File

@@ -0,0 +1,17 @@
root = true
[*]
indent_style = space
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.{yml,yaml}]
indent_size = 2
[*.py]
indent_size = 4
[*.md]
trim_trailing_whitespace = false

8
.gitattributes vendored Normal file
View File

@@ -0,0 +1,8 @@
vars/group_vars/proxmox/secrets_vm.yml diff=ansible-vault merge=binary
vars/group_vars/all/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/docker/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/k3s/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/k3s/secrets_token.yml diff=ansible-vault merge=binary
vars/group_vars/kubernetes/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/proxmox/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/proxmox/secrets_vm.yml diff=ansible-vault merge=binary

2
.gitignore vendored
View File

@@ -1,2 +0,0 @@
/secrets.yml
*.ovpn

23
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,23 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: local
hooks:
- id: ansible-galaxy-install
name: Install ansible-galaxy collections
entry: ansible-galaxy collection install -r requirements.yaml
language: system
pass_filenames: false
always_run: true
- repo: https://github.com/ansible/ansible-lint
rev: v6.22.2
hooks:
- id: ansible-lint
files: \.(yaml)$
additional_dependencies:
- ansible-core==2.15.8

126
README.md
View File

@@ -2,73 +2,81 @@
**I do not recommend this project being used for ones own infrastructure, as
this project is heavily attuned to my specific host/network setup**
The Ansible Project to provision fresh Debian VMs for my Proxmox instances.
Some values are hard coded such as the public key both in
[./scripts/debian_seed.sh](./scripts/debian_seed.sh) and [./group_vars/all/vars.yml](./group_vars/all/vars.yml).
## Prerequisites
This Ansible project automates the setup of a K3s Kubernetes cluster on Proxmox VE. It also includes playbooks for configuring Docker hosts, load balancers, and other services.
- [secrets.yml](secrets.yml) in the root directory of this repository.
Skeleton file can be found as [./secrets.yml.skeleton](./secrets.yml.skeleton).
- IP Configuration of hosts like in [./host_vars/\*](./host_vars/*)
- Setup [~/.ssh/config](~/.ssh/config) for the respective hosts used.
- Install `passlib` for your operating system. Needed to hash passwords ad-hoc.
## Repository Structure
## Improvable Variables
The repository is organized into the following main directories:
- `group_vars/k3s/vars.yml`:
- `k3s.server.ips`: Take list of IPs from host_vars `k3s_server*.yml`.
- `k3s_db_connection_string`: Embed this variable in the `k3s.db.`-directory.
Currently causes loop.
- `playbooks/`: Contains the main Ansible playbooks for different setup scenarios.
- `roles/`: Contains the Ansible roles that are used by the playbooks.
- `vars/`: Contains variable files, including group-specific variables.
## Run Playbook
## Playbooks
To run a first playbook and test the setup the following command can be executed.
The following playbooks are available:
- `proxmox.yml`: Provisions VMs and containers on Proxmox VE.
- `k3s-servers.yml`: Sets up the K3s master nodes.
- `k3s-agents.yml`: Sets up the K3s agent nodes.
- `k3s-loadbalancer.yml`: Configures a load balancer for the K3s cluster.
- `k3s-storage.yml`: Configures storage for the K3s cluster.
- `docker.yml`: Sets up Docker hosts and their load balancer.
- `docker-host.yml`: Configures the docker hosts.
- `docker-lb.yml`: Configures a load balancer for Docker services.
- `kubernetes_setup.yml`: A meta-playbook for setting up the entire Kubernetes cluster.
## Roles
The following roles are defined:
- `common`: Common configuration tasks for all nodes.
- `proxmox`: Manages Proxmox VE, including VM and container creation.
- `k3s_server`: Installs and configures K3s master nodes.
- `k3s_agent`: Installs and configures K3s agent nodes.
- `k3s_loadbalancer`: Configures an Nginx-based load balancer for the K3s cluster.
- `k3s_storage`: Configures storage solutions for Kubernetes.
- `docker_host`: Installs and configures Docker.
- `kubernetes_argocd`: Deploys Argo CD to the Kubernetes cluster.
- `node_exporter`: Installs the Prometheus Node Exporter for monitoring.
- `reverse_proxy`: Configures a Caddy-based reverse proxy.
## Usage
1. **Install dependencies:**
```bash
pip install -r requirements.txt
ansible-galaxy install -r requirements.yml
```
2. **Configure variables:**
- Create an inventory file (e.g., `vars/k3s.ini`).
- Adjust variables in `vars/group_vars/` to match your environment.
3. **Run playbooks:**
```bash
# To provision VMs on Proxmox
ansible-playbook -i vars/proxmox.ini playbooks/proxmox.yml
# To set up the K3s cluster
ansible-playbook -i vars/k3s.ini playbooks/kubernetes_setup.yml
```
## Notes
### Vault Git Diff
This repo has a `.gitattributes` which points at the repos ansible-vault files.
These can be temporarily decrypted for git diff by adding this in conjunction with the `.gitattributes`:
```sh
ansible-playbook -i production -J k3s-servers.yml
# https://stackoverflow.com/questions/29937195/how-to-diff-ansible-vault-changes
git config --global diff.ansible-vault.textconv "ansible-vault view"
```
This will run the [./k3s-servers.yml](./k3s-servers.yml) playbook and execute
its roles.
## Disclaimer
## After successful k3s installation
To access our Kubernetes cluster from our host machine to work on it via
flux and such we need to manually copy a k3s config from one of our server nodes to our host machine.
Then we need to install `kubectl` on our host machine and optionally `kubectx` if we're already
managing other Kubernetes instances.
Then we replace the localhost address inside of the config with the IP of our load balancer.
Finally we'll need to set the KUBECONFIG variable.
```sh
mkdir ~/.kube/
scp k3s-server00:/etc/rancher/k3s/k3s.yaml ~/.kube/config
chown $USER ~/.kube/config
sed -i "s/127.0.0.1/192.168.20.22/" ~/.kube/config
export KUBECONFIG=~/.kube/config
```
Install flux and continue in the flux repository.
## Longhorn Nodes
To create longhorn nodes from existing kubernetes nodes we want to increase
their storage capacity. Since we're using VMs for our k3s nodes we can
resize the root-disk of the VMs in the proxmox GUI.
Then we have to resize the partitions inside of the VM so the root partition
uses the newly available space.
When we have LVM-based root partition we can do the following:
```sh
# Create a new partition from the free space.
sudo fdisk /dev/sda
# echo "n\n\n\n\n\nw\n"
# n > 5x\n > w > \n
# Create a LVM volume on the new partition
sudo pvcreate /dev/sda3
sudo vgextend k3s-vg /dev/sda3
# Use the newly available storage in the root volume
sudo lvresize -l +100%FREE -r /dev/k3s-vg/root
```
This project is highly customized for the author's specific environment. Using it without modification is not recommended.

41
ansible.cfg Normal file
View File

@@ -0,0 +1,41 @@
[defaults]
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
interpreter_python=python3
# (pathspec) Colon separated paths in which Ansible will search for Roles.
roles_path=./roles
# (pathlist) Comma separated list of Ansible inventory sources
inventory=./vars/
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
# If executable, it will be run and the resulting stdout will be used as the password.
vault_password_file=/media/veracrypt1/scripts/ansible_vault.sh
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
# This affects vars_files, include_vars, inventory and vars plugins among others.
yaml_valid_extensions=.yaml
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host
host_key_checking=False
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
;retry_files_enabled=False
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
# This file will be overwritten after each run with the list of failed hosts from all plays.
;retry_files_save_path=
# (list) Allows to change the group variable precedence merge order.
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
[colors]
# (string) Defines the color to use when showing 'Skipped' task status
skip=dark gray
[tags]
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=
[inventory]
ignore_extensions={{(REJECT_EXTS + ('.orig', '.cfg', '.retry', '.bak'))}}

690
ansible.cfg.default Normal file
View File

@@ -0,0 +1,690 @@
[defaults]
# (boolean) By default Ansible will issue a warning when received from a task action (module or action plugin)
# These warnings can be silenced by adjusting this setting to False.
;action_warnings=True
# (list) Accept list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
;cowsay_enabled_stencils=bud-frogs, bunny, cheese, daemon, default, dragon, elephant-in-snake, elephant, eyes, hellokitty, kitty, luke-koala, meow, milk, moofasa, moose, ren, sheep, small, stegosaurus, stimpy, supermilker, three-eyes, turkey, turtle, tux, udder, vader-koala, vader, www
# (string) Specify a custom cowsay path or swap in your cowsay implementation of choice
;cowpath=
# (string) This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
;cow_selection=default
# (boolean) This option forces color mode even when running without a TTY or the "nocolor" setting is True.
;force_color=False
# (path) The default root path for Ansible config files on the controller.
;home=~/.ansible
# (boolean) This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
;nocolor=False
# (boolean) If you have cowsay installed but want to avoid the 'cows' (why????), use this.
;nocows=False
# (boolean) Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
;any_errors_fatal=False
# (path) The password file to use for the become plugin. --become-password-file.
# If executable, it will be run and the resulting stdout will be used as the password.
;become_password_file=
# (pathspec) Colon separated paths in which Ansible will search for Become Plugins.
;become_plugins={{ ANSIBLE_HOME ~ "/plugins/become:/usr/share/ansible/plugins/become" }}
# (string) Chooses which cache plugin to use, the default 'memory' is ephemeral.
;fact_caching=memory
# (string) Defines connection or path information for the cache plugin
;fact_caching_connection=
# (string) Prefix to use for cache plugin files/tables
;fact_caching_prefix=ansible_facts
# (integer) Expiration timeout for the cache plugin data
;fact_caching_timeout=86400
# (list) List of enabled callbacks, not all callbacks need enabling, but many of those shipped with Ansible do as we don't want them activated by default.
;callbacks_enabled=
# (string) When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
;collections_on_ansible_version_mismatch=warning
# (pathspec) Colon separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
;collections_path={{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}
# (boolean) A boolean to enable or disable scanning the sys.path for installed collections
;collections_scan_sys_path=True
# (path) The password file to use for the connection plugin. --connection-password-file.
;connection_password_file=
# (pathspec) Colon separated paths in which Ansible will search for Action Plugins.
;action_plugins={{ ANSIBLE_HOME ~ "/plugins/action:/usr/share/ansible/plugins/action" }}
# (boolean) When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'.
# By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run through the templating engine late
;allow_unsafe_lookups=False
# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting.
;ask_pass=False
# (boolean) This controls whether an Ansible playbook should prompt for a vault password.
;ask_vault_pass=False
# (pathspec) Colon separated paths in which Ansible will search for Cache Plugins.
;cache_plugins={{ ANSIBLE_HOME ~ "/plugins/cache:/usr/share/ansible/plugins/cache" }}
# (pathspec) Colon separated paths in which Ansible will search for Callback Plugins.
;callback_plugins={{ ANSIBLE_HOME ~ "/plugins/callback:/usr/share/ansible/plugins/callback" }}
# (pathspec) Colon separated paths in which Ansible will search for Cliconf Plugins.
;cliconf_plugins={{ ANSIBLE_HOME ~ "/plugins/cliconf:/usr/share/ansible/plugins/cliconf" }}
# (pathspec) Colon separated paths in which Ansible will search for Connection Plugins.
;connection_plugins={{ ANSIBLE_HOME ~ "/plugins/connection:/usr/share/ansible/plugins/connection" }}
# (boolean) Toggles debug output in Ansible. This is *very* verbose and can hinder multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production.
;debug=False
# (string) This indicates the command to use to spawn a shell under for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is.
;executable=/bin/sh
# (string) This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering.
# If not set, it will fallback to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``.
# This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module.
# The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules, by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults.
;fact_path=
# (pathspec) Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
;filter_plugins={{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}
# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host.
# When false, the handlers will not run if a failure has occurred on a host.
# This can also be set per play or on the command line. See Handlers and Failure for more details.
;force_handlers=False
# (integer) Maximum number of forks Ansible will use to execute tasks on target hosts.
;forks=5
# (string) This setting controls the default policy of fact gathering (facts discovered about remote systems).
# This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin.
;gathering=implicit
# (list) Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering. See the module documentation for specifics.
# It does **not** apply to user defined ``ansible.builtin.setup`` tasks.
;gather_subset=
# (integer) Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
# It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks.
;gather_timeout=
# (string) This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
# This does not affect variables whose values are scalars (integers, strings) or arrays.
# **WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it.
# We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much complexity has been introduced into the data structures and plays.
# For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, but the setting itself affects all sources and makes debugging even harder.
# All playbooks and roles in the official examples repos assume the default for this setting.
# Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables. For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
# The Ansible project recommends you **avoid ``merge`` for new projects.**
# It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it. New projects should **avoid 'merge'**.
;hash_behaviour=replace
# (pathlist) Comma separated list of Ansible inventory sources
;inventory=/etc/ansible/hosts
# (pathspec) Colon separated paths in which Ansible will search for HttpApi Plugins.
;httpapi_plugins={{ ANSIBLE_HOME ~ "/plugins/httpapi:/usr/share/ansible/plugins/httpapi" }}
# (float) This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage might be a concern.
# The default corresponds to the value hardcoded in Ansible <= 2.1
;internal_poll_interval=0.001
# (pathspec) Colon separated paths in which Ansible will search for Inventory Plugins.
;inventory_plugins={{ ANSIBLE_HOME ~ "/plugins/inventory:/usr/share/ansible/plugins/inventory" }}
# (string) This is a developer-specific feature that allows enabling additional Jinja2 extensions.
# See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)
;jinja2_extensions=[]
# (boolean) This option preserves variable types during template operations.
;jinja2_native=False
# (boolean) Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
# If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
;keep_remote_files=False
# (boolean) Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``.
;bin_ansible_callbacks=False
# (tmppath) Temporary directory for Ansible to use on the controller.
;local_tmp={{ ANSIBLE_HOME ~ "/tmp" }}
# (list) List of logger names to filter out of the log file
;log_filter=
# (path) File to which Ansible will log on the controller. When empty logging is disabled.
;log_path=
# (pathspec) Colon separated paths in which Ansible will search for Lookup Plugins.
;lookup_plugins={{ ANSIBLE_HOME ~ "/plugins/lookup:/usr/share/ansible/plugins/lookup" }}
# (string) Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant for those two modules.
;ansible_managed=Ansible managed
# (string) This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
;module_args=
# (string) Compression scheme to use when transferring Python modules to the target.
;module_compression=ZIP_DEFLATED
# (string) Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``.
;module_name=command
# (pathspec) Colon separated paths in which Ansible will search for Modules.
;library={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }}
# (pathspec) Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
;module_utils={{ ANSIBLE_HOME ~ "/plugins/module_utils:/usr/share/ansible/plugins/module_utils" }}
# (pathspec) Colon separated paths in which Ansible will search for Netconf Plugins.
;netconf_plugins={{ ANSIBLE_HOME ~ "/plugins/netconf:/usr/share/ansible/plugins/netconf" }}
# (boolean) Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures.
;no_log=False
# (boolean) Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writing to the event log.
;no_target_syslog=False
# (raw) What templating should return as a 'null' value. When not set it will let Jinja2 decide.
;null_representation=
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed.
;poll_interval=15
# (path) Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying --private-key with every invocation.
;private_key_file=
# (boolean) By default, imported roles publish their variables to the play and other roles, this setting can avoid that.
# This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook.
# Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
;private_role_vars=False
# (integer) Port to use in remote connections, when blank it will use the connection plugin default.
;remote_port=
# (string) Sets the login user for the target machines
# When blank it uses the connection plugin's default, normally the user currently executing Ansible.
;remote_user=
# (pathspec) Colon separated paths in which Ansible will search for Roles.
;roles_path={{ ANSIBLE_HOME ~ "/roles:/usr/share/ansible/roles:/etc/ansible/roles" }}
# (string) Set the main callback used to display Ansible output. You can only have one at a time.
# You can have many other callbacks, but just one can be in charge of stdout.
# See :ref:`callback_plugins` for a list of available options.
;stdout_callback=default
# (string) Set the default strategy used for plays.
;strategy=linear
# (pathspec) Colon separated paths in which Ansible will search for Strategy Plugins.
;strategy_plugins={{ ANSIBLE_HOME ~ "/plugins/strategy:/usr/share/ansible/plugins/strategy" }}
# (boolean) Toggle the use of "su" for tasks.
;su=False
# (string) Syslog facility to use when Ansible logs to the remote target
;syslog_facility=LOG_USER
# (pathspec) Colon separated paths in which Ansible will search for Terminal Plugins.
;terminal_plugins={{ ANSIBLE_HOME ~ "/plugins/terminal:/usr/share/ansible/plugins/terminal" }}
# (pathspec) Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
;test_plugins={{ ANSIBLE_HOME ~ "/plugins/test:/usr/share/ansible/plugins/test" }}
# (integer) This is the default timeout for connection plugins to use.
;timeout=10
# (string) Can be any connection plugin available to your ansible installation.
# There is also a (DEPRECATED) special 'smart' option, that will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions.
;transport=ssh
# (boolean) When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
# Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written.
;error_on_undefined_vars=True
# (pathspec) Colon separated paths in which Ansible will search for Vars Plugins.
;vars_plugins={{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}
# (string) The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.
;vault_encrypt_identity=
# (string) The label to use for the default vault id label in cases where a vault id label is not provided
;vault_identity=default
# (list) A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.
;vault_identity_list=
# (string) If true, decrypting vaults with a vault id will only try the password from the matching vault-id
;vault_id_match=False
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
# If executable, it will be run and the resulting stdout will be used as the password.
;vault_password_file=
# (integer) Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
;verbosity=0
# (boolean) Toggle to control the showing of deprecation warnings
;deprecation_warnings=True
# (boolean) Toggle to control showing warnings related to running devel
;devel_warning=True
# (boolean) Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header.
# This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed.
# If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values See How do I keep secret data in my playbook? for more information.
;display_args_to_stdout=False
# (boolean) Toggle to control displaying skipped task/host entries in a task in the default callback
;display_skipped_hosts=True
# (string) Root docsite URL used to generate docs URLs in warning/error text; must be an absolute URL with valid scheme and trailing slash.
;docsite_root_url=https://docs.ansible.com/ansible-core/
# (pathspec) Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
;doc_fragment_plugins={{ ANSIBLE_HOME ~ "/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments" }}
# (string) By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
# These warnings can be silenced by adjusting this setting to False.
;duplicate_dict_key=warn
# (boolean) Whether or not to enable the task debugger, this previously was done as a strategy plugin.
# Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
# a task is failed on unreachable. Use the debugger keyword for more flexibility.
;enable_task_debugger=False
# (boolean) Toggle to allow missing handlers to become a warning instead of an error when notifying.
;error_on_missing_handler=True
# (list) Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
# If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figure it out).
# This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
;facts_modules=smart
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host
;host_key_checking=True
# (boolean) Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
# Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
;inject_facts_as_vars=True
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
;interpreter_python=auto
# (boolean) If 'false', invalid attributes for a task will result in warnings instead of errors
;invalid_task_attribute_failed=True
# (boolean) Toggle to control showing warnings related to running a Jinja version older than required for jinja2_native
;jinja2_native_warning=True
# (boolean) By default Ansible will issue a warning when there are no hosts in the inventory.
# These warnings can be silenced by adjusting this setting to False.
;localhost_warning=True
# (int) Maximum size of files to be considered for diff display
;max_diff_size=104448
# (list) List of extensions to ignore when looking for modules to load
# This is for rejecting script and binary module fallback extensions
;module_ignore_exts={{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}
# (bool) Enables whether module responses are evaluated for containing non UTF-8 data
# Disabling this may result in unexpected behavior
# Only ansible-core should evaluate this configuration
;module_strict_utf8_response=True
# (list) TODO: write it
;network_group_modules=eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos
# (boolean) Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
;old_plugin_cache_clear=False
# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
;playbook_dir=
# (string) This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
;playbook_vars_root=top
# (path) A path to configuration for filtering which plugins installed on the system are allowed to be used.
# See :ref:`plugin_filtering_config` for details of the filter file's format.
# The default is /etc/ansible/plugin_filters.yml
;plugin_filters_cfg=
# (string) Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default value of 0 does not attempt to adjust existing system-defined limits.
;python_module_rlimit_nofile=0
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
;retry_files_enabled=False
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
# This file will be overwritten after each run with the list of failed hosts from all plays.
;retry_files_save_path=
# (str) This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
;run_vars_plugins=demand
# (bool) This adds the custom stats set via the set_stats plugin to the default output
;show_custom_stats=False
# (string) Action to take when a module parameter value is converted to a string (this does not affect variables). For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. will be converted by the YAML parser unless fully quoted.
# Valid options are 'error', 'warn', and 'ignore'.
# Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
;string_conversion_action=warn
# (boolean) Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
# These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
;system_warnings=True
# (boolean) This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified.
# True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
;task_debugger_ignore_errors=True
# (integer) Set the maximum time (in seconds) that a task can run for.
# If set to 0 (the default) there is no timeout.
;task_timeout=0
# (string) Make ansible transform invalid characters in group names supplied by inventory sources.
;force_valid_group_names=never
# (boolean) Toggles the use of persistence for connections.
;use_persistent_connections=False
# (bool) A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group. Metadata containing unexpected fields or value types will produce a warning when this is True.
;validate_action_group_metadata=True
# (list) Accept list for variable plugins that require it.
;vars_plugins_enabled=host_group_vars
# (list) Allows to change the group variable precedence merge order.
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
# (string) The salt to use for the vault encryption. If it is not provided, a random salt will be used.
;vault_encrypt_salt=
# (bool) Force 'verbose' option to use stderr instead of stdout
;verbose_to_stderr=False
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
# This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the overall maximum duration the task can take will be extended by the amount specified here.
;win_async_startup_timeout=5
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
# This affects vars_files, include_vars, inventory and vars plugins among others.
;yaml_valid_extensions=.yml, .yaml, .json
[privilege_escalation]
# (boolean) Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
;agnostic_become_prompt=True
# (boolean) This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
# If executable, it will be run and the resulting stdout will be used as the password.
;become_allow_same_user=False
# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login.
;become=False
# (boolean) Toggle to prompt for privilege escalation password.
;become_ask_pass=False
# (string) executable to use for privilege escalation, otherwise Ansible will depend on PATH
;become_exe=
# (string) Flags to pass to the privilege escalation executable.
;become_flags=
# (string) Privilege escalation method to use when `become` is enabled.
;become_method=sudo
# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
;become_user=root
[persistent_connection]
# (path) Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
# If null, ansible will start with the same directory as the ansible script.
;ansible_connection_path=
# (int) This controls the amount of time to wait for response from remote device before timing out persistent connection.
;command_timeout=30
# (integer) This controls the retry timeout for persistent connection to connect to the local domain socket.
;connect_retry_timeout=15
# (integer) This controls how long the persistent connection will remain idle before it is destroyed.
;connect_timeout=30
# (path) Path to socket to be used by the connection persistence system.
;control_path_dir={{ ANSIBLE_HOME ~ "/pc" }}
[connection]
# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer.
# It can result in a very significant performance improvement when enabled.
# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default.
# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
;pipelining=False
[colors]
# (string) Defines the color to use on 'Changed' task status
;changed=yellow
# (string) Defines the default color to use for ansible-console
;console_prompt=white
# (string) Defines the color to use when emitting debug messages
;debug=dark gray
# (string) Defines the color to use when emitting deprecation messages
;deprecate=purple
# (string) Defines the color to use when showing added lines in diffs
;diff_add=green
# (string) Defines the color to use when showing diffs
;diff_lines=cyan
# (string) Defines the color to use when showing removed lines in diffs
;diff_remove=red
# (string) Defines the color to use when emitting error messages
;error=red
# (string) Defines the color to use for highlighting
;highlight=white
# (string) Defines the color to use when showing 'OK' task status
;ok=green
# (string) Defines the color to use when showing 'Skipped' task status
;skip=cyan
# (string) Defines the color to use on 'Unreachable' status
;unreachable=bright red
# (string) Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
;verbose=blue
# (string) Defines the color to use when emitting warning messages
;warn=bright purple
[selinux]
# (boolean) This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh. This is necessary when running on systems which do not have SELinux.
;libvirt_lxc_noseclabel=False
# (list) Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors.
# Data corruption may occur and writes are not always verified when a filesystem is in the list.
;special_context_filesystems=fuse, nfs, vboxsf, ramfs, 9p, vfat
[diff]
# (bool) Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
;always=False
# (integer) How many lines of context to show when displaying the differences between files.
;context=3
[galaxy]
# (path) The directory that stores cached responses from a Galaxy server.
# This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
# Cache files inside this dir will be ignored if they are world writable.
;cache_dir={{ ANSIBLE_HOME ~ "/galaxy_cache" }}
# (bool) whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`
;collections_path_warning=True
# (path) Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
;collection_skeleton=
# (list) patterns of files to ignore inside a Galaxy collection skeleton directory
;collection_skeleton_ignore=^.git$, ^.*/.git_keep$
# (bool) Disable GPG signature verification during collection installation.
;disable_gpg_verify=False
# (bool) Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when outputting the stdout to a file.
# This config option controls whether the display wheel is shown or not.
# The default is to show the display wheel if stdout has a tty.
;display_progress=
# (path) Configure the keyring used for GPG signature verification during collection installation and verification.
;gpg_keyring=
# (boolean) If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate.
;ignore_certs=
# (list) A list of GPG status codes to ignore during GPG signature verification. See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
# If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`, signature verification will fail even if all error codes are ignored.
;ignore_signature_status_codes=
# (str) The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
# This should be a positive integer or all to indicate all signatures must successfully validate the collection.
# Prepend + to the value to fail if no valid signatures are found for the collection.
;required_valid_signature_count=1
# (path) Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
;role_skeleton=
# (list) patterns of files to ignore inside a Galaxy role or collection skeleton directory
;role_skeleton_ignore=^.git$, ^.*/.git_keep$
# (string) URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source.
;server=https://galaxy.ansible.com
# (list) A list of Galaxy servers to use when installing a collection.
# The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
# See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.
# The order of servers in this list is used to as the order in which a collection is resolved.
# Setting this config option will ignore the :ref:`galaxy_server` config option.
;server_list=
# (int) The default timeout for Galaxy API calls. Galaxy servers that don't configure a specific timeout will fall back to this value.
;server_timeout=60
# (path) Local path to galaxy access token file
;token_path={{ ANSIBLE_HOME ~ "/galaxy_token" }}
[inventory]
# (string) This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
;host_pattern_mismatch=warning
# (boolean) If 'true', it is a fatal error when any given inventory source cannot be successfully parsed by any available inventory plugin; otherwise, this situation only attracts a warning.
;any_unparsed_is_failed=False
# (bool) Toggle to turn on inventory caching.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
# This message will be removed in 2.16.
;cache=False
# (string) The plugin for caching inventory.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_plugin=
# (string) The inventory cache connection.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_connection=
# (string) The table prefix for the cache plugin.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_prefix=ansible_inventory_
# (string) Expiration timeout for the inventory cache plugin data.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_timeout=3600
# (list) List of enabled inventory plugins, it also determines the order in which they are used.
;enable_plugins=host_list, script, auto, yaml, ini, toml
# (bool) Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
;export=False
# (list) List of extensions to ignore when using a directory as an inventory source
;ignore_extensions={{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}
# (list) List of patterns to ignore when using a directory as an inventory source
;ignore_patterns=
# (bool) If 'true' it is a fatal error if every single potential inventory source fails to parse, otherwise this situation will only attract a warning.
;unparsed_is_failed=False
# (boolean) By default Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory.
# These warnings can be silenced by adjusting this setting to False.
;inventory_unparsed_warning=True
[netconf_connection]
# (string) This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump host ssh settings should be present in ~/.ssh/config file, alternatively it can be set to custom ssh configuration file path to read the bastion/jump host settings.
;ssh_config=
[paramiko_connection]
# (boolean) TODO: write it
;host_key_auto_add=False
# (boolean) TODO: write it
;look_for_keys=True
[jinja2]
# (list) This list of filters avoids 'type conversion' when templating variables
# Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
;dont_type_filters=string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json
[tags]
# (list) default list of tags to run in your plays, Skip Tags has precedence.
;run=
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=

69
blog.md Normal file
View File

@@ -0,0 +1,69 @@
---
title: "Automating My Homelab: From Bare Metal to Kubernetes with Ansible"
date: 2025-07-27
author: "TuDatTr"
tags: ["Ansible", "Proxmox", "Kubernetes", "K3s", "IaC", "Homelab"]
---
## The Homelab: Repeatable, Automated, and Documented
For many tech enthusiasts, a homelab is a playground for learning, experimenting, and self-hosting services. But as the complexity grows, so does the management overhead. Manually setting up virtual machines, configuring networks, and deploying applications becomes a tedious and error-prone process. This lead me to building my homelab as Infrastructure as Code (IaC) with Ansible.
This blog post walks you through my Ansible project, which automates the entire lifecycle of my homelab—from provisioning VMs on Proxmox to deploying a production-ready K3s Kubernetes cluster.
## Why Ansible?
When I decided to automate my infrastructure, I considered several tools. I chose Ansible for its simplicity, agentless architecture, and gentle learning curve. Writing playbooks in YAML felt declarative and intuitive, and the vast collection of community-supported modules meant I wouldn't have to reinvent the wheel.
## The Architecture: A Multi-Layered Approach
My Ansible project is designed to be modular and scalable, with a clear separation of concerns. It's built around a collection of roles, each responsible for a specific component of the infrastructure.
### Layer 1: Proxmox Provisioning
The foundation of my homelab is Proxmox VE. The `proxmox` role is the first step in the automation pipeline. It handles:
- **VM and Container Creation:** Using a simple YAML definition in my `vars` files, I can specify the number of VMs and containers to create, their resources (CPU, memory, disk), and their base operating system images.
- **Cloud-Init Integration:** For VMs, I leverage Cloud-Init to perform initial setup, such as setting the hostname, creating users, and injecting SSH keys for Ansible to connect to.
- **Hardware Passthrough:** The role also configures hardware passthrough for devices like Intel Quick Sync for video transcoding in my media server.
### Layer 2: The K3s Kubernetes Cluster
With the base VMs ready, the next step is to build the Kubernetes cluster. I chose K3s for its lightweight footprint and ease of installation. The setup is divided into several roles:
- `k3s_server`: This role bootstraps the first master node and then adds additional master nodes to create a highly available control plane.
- `k3s_agent`: This role joins the worker nodes to the cluster.
- `k3s_loadbalancer`: A dedicated VM running Nginx is set up to act as a load balancer for the K3s API server, ensuring a stable endpoint for `kubectl` and other clients.
### Layer 3: Applications and Services
Once the Kubernetes cluster is up and running, it's time to deploy applications. My project includes roles for:
- `docker_host`: For services that are better suited to run in a traditional Docker environment, this role sets up and configures Docker hosts.
- `kubernetes_argocd`: I use Argo CD for GitOps-based continuous delivery. This role deploys Argo CD to the cluster and configures it to sync with my application repositories.
- `reverse_proxy`: Caddy is my reverse proxy of choice, and this role automates its installation and configuration, including obtaining SSL certificates from Let's Encrypt.
## Putting It All Together: The Power of Playbooks
The playbooks in the `playbooks/` directory tie everything together. For example, the `kubernetes_setup.yml` playbook runs all the necessary roles in the correct order to bring up the entire Kubernetes cluster from scratch.
```yaml
# playbooks/kubernetes_setup.yml
---
- name: Set up Kubernetes Cluster
hosts: all
gather_facts: true
roles:
- role: k3s_server
- role: k3s_agent
- role: k3s_loadbalancer
- role: kubernetes_argocd
```
## Final Thoughts and Future Plans
This Ansible project has transformed my homelab from a collection of manually configured machines into a fully automated and reproducible environment. I can now tear down and rebuild my entire infrastructure with a single command, which gives me the confidence to experiment without fear of breaking things.
While the project is highly tailored to my specific needs, I hope this overview provides some inspiration for your own automation journey. The principles of IaC and the power of tools like Ansible can be applied to any environment, big or small.
What's next? I plan to explore more advanced Kubernetes concepts, such as Cilium for networking and policy, and integrate more of my self-hosted services into the GitOps workflow with Argo CD. The homelab is never truly "finished," and that's what makes it so much fun.

View File

@@ -1,10 +0,0 @@
---
- name: Run the common role on k3s
hosts: k3s
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common

19
db.yml
View File

@@ -1,19 +0,0 @@
---
- name: Set up Servers
hosts: db
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: postgres
tags:
- postgres
- role: node_exporter
tags:
- node_exporter
- role: postgres_exporter
tags:
- postgres_exporter

View File

@@ -1,36 +0,0 @@
#
# Essential
#
root: root
user: tudattr
timezone: Europe/Berlin
puid: "1000"
pgid: "1000"
pk_path: "/media/veracrypt1/genesis"
pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqc9fnzfCz8fQDFzla+D8PBhvaMmFu2aF+TYkkZRxl9 tuan@genesis-2022-01-20"
public_domain: tudattr.dev
internal_domain: seyshiro.de
#
# Packages
#
common_packages:
- build-essential
- curl
- git
- iperf3
- neovim
- rsync
- smartmontools
- sudo
- systemd-timesyncd
- tree
- screen
- bat
- fd-find
- ripgrep
arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"

View File

@@ -1,548 +0,0 @@
docker:
url: "https://download.docker.com/linux"
apt_release_channel: "stable"
directories:
opt: "/opt/docker/"
compose: "/opt/docker/compose"
caddy:
admin_email: me+acme@tudattr.dev
domain: "seyshiro.de"
services:
- name: syncthing
vm:
- docker-host00
container_name: syncthing
image: syncthing/syncthing
restart: unless-stopped
volumes:
- name: "Data"
internal: /var/syncthing/
external: /media/docker/data/syncthing/
ports:
- name: "http"
internal: 8384
external: 8384
- name: ""
internal: 22000
external: 22000
- name: ""
internal: 22000
external: 22000
- name: ""
internal: 21027
external: 21027
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: status
vm:
- docker-host00
container_name: kuma
image: louislam/uptime-kuma:1
restart: unless-stopped
volumes:
- name: "Data"
internal: /app/data
external: /opt/local/kuma/
ports:
- name: "http"
internal: 3001
external: 3001
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: plex
vm:
- docker-host00
container_name: plex
image: lscr.io/linuxserver/plex:latest
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/plex/config/
- name: "TV Series"
internal: /tv:ro
external: /media/series
- name: "Movies"
internal: /movies:ro
external: /media/movies
- name: "Music"
internal: /music:ro
external: /media/songs
devices:
- name: "Graphics Card"
internal: /dev/dri
external: /dev/dri
ports:
- name: "http"
internal: 32400
external: 32400
- name: ""
internal: 1900
external: 1900
- name: ""
internal: 3005
external: 3005
- name: ""
internal: 5353
external: 5353
- name: ""
internal: 32410
external: 32410
- name: ""
internal: 8324
external: 8324
- name: ""
internal: 32412
external: 32412
- name: ""
internal: 32469
external: 32469
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- VERSION=docker
- name: jellyfin
vm:
- docker-host02
container_name: jellyfin
image: jellyfin/jellyfin
restart: "unless-stopped"
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/jellyfin/config
- name: "Cache"
internal: /cache
external: /opt/docker/config/jellyfin/cache
- name: "Tv Series"
internal: /tv:ro
external: /media/series
- name: "Music"
internal: /movies:ro
external: /media/movies
- name: "Music"
internal: /music:ro
external: /media/songs
devices:
- name: "Graphics Card"
internal: /dev/dri
external: /dev/dri
ports:
- name: "http"
internal: 8096
external: 8096
environment:
- name: hass
vm:
- docker-host02
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
restart: unless-stopped
privileged: true
volumes:
- name: "Configuration"
internal: /config/
external: /opt/local/home-assistant/config/
- name: "Local Time"
internal: /etc/localtime:ro
external: /etc/localtime
ports:
- name: "http"
internal: 8123
external: 8123
- name: ""
internal: 4357
external: 4357
- name: ""
internal: 5683
external: 5683
- name: ""
internal: 5683
external: 5683
- name: ddns
vm:
- docker-host00
container_name: ddns-updater
image: ghcr.io/qdm12/ddns-updater
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /updater/data/"
external: /opt/docker/config/ddns-updater/data/
ports:
- name: "http"
internal: 8000
external: 8001
- name: sonarr
vm:
- docker-host00
container_name: sonarr
image: lscr.io/linuxserver/sonarr:latest
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/sonarr/config
- name: "Tv Series"
internal: /tv
external: /media/series
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/sonarr
ports:
- name: "http"
internal: 8989
external: 8989
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: radarr
vm:
- docker-host00
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/radarr/config
- name: "Movies"
internal: /movies
external: /media/movies
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/radarr
ports:
- name: "http"
internal: 7878
external: 7878
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: lidarr
vm:
- docker-host00
container_name: lidarr
image: lscr.io/linuxserver/lidarr:latest
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/lidarr/config
- name: "Music"
internal: /music
external: /media/songs
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/lidarr
ports:
- name: "http"
internal: 8686
external: 8686
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: prowlarr
vm:
- docker-host00
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:latest
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /config
external: /opt/local/prowlarr/config
ports:
- name: "http"
internal: 9696
external: 9696
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: paperless
vm:
- docker-host00
container_name: paperless
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- paperless-postgres
- paperless-broker
volumes:
- name: "Configuration"
internal: /usr/src/paperless/data
external: /opt/local/paperless/data/data
- name: "Media"
internal: /usr/src/paperless/media
external: /opt/local/paperless/data/media
- name: "Document Export"
internal: /usr/src/paperless/export
external: /opt/local/paperless/data/export
- name: "Document Consume"
internal: /usr/src/paperless/consume
external: /opt/local/paperless/data/consume
environment:
- "PAPERLESS_REDIS=redis://paperless-broker:6379"
- "PAPERLESS_DBHOST=paperless-postgres"
- "PAPERLESS_DBUSER=paperless"
- "PAPERLESS_DBPASS={{ vault.docker.paperless.dbpass }}"
- "USERMAP_UID=1000"
- "USERMAP_GID=1000"
- "PAPERLESS_URL=https://paperless.{{ domain }}"
- "PAPERLESS_TIME_ZONE=Europe/Berlin"
- "PAPERLESS_OCR_LANGUAGE=deu"
ports:
- name: "http"
internal: 8000
external: 8000
- name: pdf
vm:
- docker-host00
container_name: stirling
image: frooodle/s-pdf:latest
restart: unless-stopped
ports:
- name: "http"
internal: 8080
external: 8080
- name: git
vm:
- docker-host02
container_name: gitea
image: gitea/gitea:1.23.1-rootless
restart: unless-stopped
volumes:
- name: "Configuration"
internal: /etc/gitea
external: /opt/local/gitea/config
- name: "Data"
internal: /var/lib/gitea
external: /opt/local/gitea/data
- name: "Time Zone"
internal: /etc/timezone:ro
external: /etc/timezone
- name: "Local Time"
internal: /etc/localtime:ro
external: /etc/localtime
ports:
- name: "http"
internal: 3000
external: 3000
- name: "ssh"
internal: 2222
external: 2222
environment:
- USER_UID=1000
- USER_GID=1000
- name: changedetection
vm:
- docker-host00
container_name: changedetection
image: dgtlmoon/changedetection.io
restart: unless-stopped
volumes:
- name: "Data"
internal: /datastore
external: /opt/docker/config/changedetection/data/
ports:
- name: "http"
internal: 5000
external: 5000
- name: gluetun
vm:
- docker-host00
container_name: gluetun
image: qmcgaw/gluetun
restart: unless-stopped
cap_add:
- NET_ADMIN
devices:
- name: "Tunnel"
internal: /dev/net/tun
external: /dev/net/tun
volumes:
- name: "Configuration"
internal: /gluetun
external: /opt/docker/config/gluetun/config
ports:
- name: "Qbit Client"
internal: 8082
external: 8082
- name: "Torrentleech Client"
internal: 8083
external: 8083
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- VPN_SERVICE_PROVIDER=protonvpn
- UPDATER_VPN_SERVICE_PROVIDERS=protonvpn
- UPDATER_PERIOD=24h
- "SERVER_COUNTRIES={{ vault.docker.proton.country }}"
- "OPENVPN_USER={{ vault.docker.proton.openvpn_user }}"
- "OPENVPN_PASSWORD={{ vault.docker.proton.openvpn_password }}"
- name: torrentleech
vm:
- docker-host00
container_name: torrentleech
image: qbittorrentofficial/qbittorrent-nox
restart: unless-stopped
depends_on:
- gluetun
network_mode: "container:gluetun"
volumes:
- name: "Configuration"
internal: /config
external: /opt/docker/config/torrentleech/config
- name: "Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads
ports:
- name: "http"
internal: proxy_only
external: 8083
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- QBT_EULA="accept"
- QBT_WEBUI_PORT="8083"
- name: qbit
vm:
- docker-host00
container_name: qbit
image: qbittorrentofficial/qbittorrent-nox
restart: unless-stopped
depends_on:
- gluetun
network_mode: "container:gluetun"
volumes:
- name: "Configuration"
internal: /config
external: /opt/docker/config/qbit/config
- name: "Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads
ports:
- name: "http"
internal: proxy_only
external: 8082
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- QBT_EULA="accept"
- QBT_WEBUI_PORT="8082"
- name: cadvisor
vm:
- docker-host00
- docker-host01
- docker-host02
container_name: cadvisor
image: gcr.io/cadvisor/cadvisor:latest
restart: unless-stopped
ports:
- name: ""
internal: 8080
external: 8081
volumes:
- name: "Root"
internal: /rootfs:ro
external: /
- name: "Run"
internal: /var/run:rw
external: /var/run
- name: "System"
internal: /sys:ro
external: /sys
- name: "Docker"
internal: /var/lib/docker:ro
external: /var/lib/docker
# - name: template
# vm:
# -
# container_name:
# image:
# restart:
# volumes:
# - name:
# internal:
# external:
# ports:
# - name:
# internal:
# external:
# environment:
# -
# - name: calibre
# vm:
# - docker-host00
# container_name: calibre
# image: lscr.io/linuxserver/calibre-web:latest
# restart: unless-stopped
# volumes:
# - name: "Configuration"
# internal: /config"
# external: /opt/local/calibre/
# - name: "Books"
# internal: /books"
# external: /media/docker/data/calibre/
# ports:
# - name: "http"
# internal: 5000
# external: 5000
# environment:
# - PUID=1000
# - PGID=1000
# - TZ=Europe/Berlin
# - DOCKER_MODS=linuxserver/mods:universal-calibre
# - name: grafana
# vm:
# container_name: grafana
# image: grafana/grafana-oss
# restart: unless-stopped
# volumes:
# - name: "Configuration"
# internal: /etc/grafana/
# external: /opt/docker/config/grafana/config/
# - name: "Data"
# internal: /var/lib/grafana/
# external: /media/docker/data/grafana/
# ports:
# environment:
# - PUID=472
# - PGID=472
# - TZ=Europe/Berlin
# - name: prometheus
# vm:
# - docker-host00
# container_name: prometheus
# image: prom/prometheus
# restart: unless-stopped
# volumes:
# - name: "Configuration"
# internal: /etc/prometheus/
# external: /opt/docker/config/prometheus/
# - name: "Data"
# internal: /prometheus/
# external: prometheus_data
# ports:
# - name: "http"
# internal: 5000
# external: 5000
# environment:
# - PUID=65534
# - PGID=65534
# - TZ=Europe/Berlin

View File

@@ -1,28 +0,0 @@
db:
default_user:
user: "postgres"
name: "k3s"
user: "k3s"
password: "{{ vault.k3s.postgres.db.password }}"
listen_address: "{{ k3s.db.ip }}"
k3s:
net: "192.168.20.0/24"
server:
ips:
- 192.168.20.21
- 192.168.20.24
- 192.168.20.30
loadbalancer:
ip: 192.168.20.22
default_port: 6443
db:
ip: 192.168.20.23
default_port: "5432"
agent:
ips:
- 192.168.20.25
- 192.168.20.26
- 192.168.20.27
k3s_db_connection_string: "postgres://{{ db.user }}:{{ db.password }}@{{ k3s.db.ip }}:{{ k3s.db.default_port }}/{{ db.name }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.12
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.aya01.root.sudo }}"
host:
hostname: "aya01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.34
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host00.sudo }}"
host:
hostname: "docker-host00"
ip: "{{ ansible_host }}"

View File

@@ -1,26 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.35
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host01.sudo }}"
host:
hostname: "docker-host01"
ip: "{{ ansible_host }}"
enable_nginx: true
enable_syncthing: true
enable_kuma: true
enable_plex: true
enable_arr: true
enable_prometheus: false
enable_grafana: false
enable_ddns_updater: true
enable_homeassistant: false
enable_stirling: true
enable_jellyfin: false
enable_paperless: true
enable_gitea: false
enable_changedetection: true
enable_calibre: false

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.36
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host02.sudo }}"
host:
hostname: "docker-host02"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.37
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.lb.sudo }}"
host:
hostname: "docker-lb"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.14
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.inko.root.sudo }}"
host:
hostname: "inko"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.25
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent00.sudo }}"
host:
hostname: "k3s-agent00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.26
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent01.sudo }}"
host:
hostname: "k3s-agent01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.27
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent02.sudo }}"
host:
hostname: "k3s-agent02"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.22
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.loadbalancer.sudo }}"
host:
hostname: "k3s-loadbalancer"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.32
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn00.sudo }}"
host:
hostname: "k3s-longhorn00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.33
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn01.sudo }}"
host:
hostname: "k3s-longhorn01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.31
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn02.sudo }}"
host:
hostname: "k3s-longhorn02"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.23
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.postgres.sudo }}"
host:
hostname: "k3s-postgres"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.21
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server00.sudo }}"
host:
hostname: "k3s-server00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.24
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
host:
hostname: "k3s-server01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.30
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server02.sudo }}"
host:
hostname: "k3s-server02"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.28
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.lulu.root.sudo }}"
host:
hostname: "lulu"
ip: "{{ ansible_host }}"

View File

@@ -1,31 +0,0 @@
- name: Set up Agents
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
roles:
- role: common
when: inventory_hostname in groups["k3s_agent"]
tags:
- common
- role: k3s_agent
when: inventory_hostname in groups["k3s_agent"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_agent
- role: node_exporter
when: inventory_hostname in groups["k3s_agent"]
tags:
- node_exporter

View File

@@ -1,16 +0,0 @@
---
- name: Set up Servers
hosts: k3s_server
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: k3s_server
tags:
- k3s_server
- role: node_exporter
tags:
- node_exporter

View File

@@ -1,31 +0,0 @@
- name: Set up storage
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
roles:
- role: common
when: inventory_hostname in groups["k3s_storage"]
tags:
- common
- role: k3s_storage
when: inventory_hostname in groups["k3s_storage"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_storage
- role: node_exporter
when: inventory_hostname in groups["k3s_storage"]
tags:
- node_exporter

View File

@@ -1,16 +0,0 @@
---
- name: Set up Servers
hosts: loadbalancer
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: loadbalancer
tags:
- loadbalancer
- role: node_exporter
tags:
- node_exporter

View File

@@ -1,13 +1,11 @@
---
- name: Set up Servers
hosts: docker_host
gather_facts: yes
vars_files:
- secrets.yml
gather_facts: true
roles:
- role: common
tags:
- common
# - role: common
# tags:
# - common
- role: docker_host
tags:
- docker_host

View File

@@ -1,13 +1,13 @@
---
- name: Set up reverse proxy for docker
hosts: docker_lb
gather_facts: yes
vars_files:
- secrets.yml
hosts: docker
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["docker_lb"]
- role: reverse_proxy
tags:
- reverse_proxy
when: inventory_hostname in groups["docker_lb"]

5
playbooks/docker.yaml Normal file
View File

@@ -0,0 +1,5 @@
---
- name: Setup Docker Hosts
ansible.builtin.import_playbook: docker-host.yaml
- name: Setup Docker load balancer
ansible.builtin.import_playbook: docker-lb.yaml

16
playbooks/k3s-agents.yaml Normal file
View File

@@ -0,0 +1,16 @@
- name: Set up Agents
hosts: k3s
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_agent"]
tags:
- common
- role: k3s_agent
when: inventory_hostname in groups["k3s_agent"]
tags:
- k3s_agent
# - role: node_exporter
# when: inventory_hostname in groups["k3s_agent"]
# tags:
# - node_exporter

View File

@@ -0,0 +1,17 @@
---
- name: Set up Servers
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["k3s_loadbalancer"]
- role: k3s_loadbalancer
tags:
- k3s_loadbalancer
when: inventory_hostname in groups["k3s_loadbalancer"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_loadbalancer"]

View File

@@ -0,0 +1,17 @@
---
- name: Set up Servers
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["k3s_server"]
- role: k3s_server
tags:
- k3s_server
when: inventory_hostname in groups["k3s_server"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_server"]

View File

@@ -0,0 +1,16 @@
- name: Set up storage
hosts: k3s_nodes
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_storage"]
tags:
- common
- role: k3s_storage
when: inventory_hostname in groups["k3s_storage"]
tags:
- k3s_storage
# - role: node_exporter
# when: inventory_hostname in groups["k3s_storage"]
# tags:
# - node_exporter

View File

@@ -0,0 +1,10 @@
---
- name: Setup Kubernetes Cluster
hosts: kubernetes
any_errors_fatal: true
gather_facts: false
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
roles:
- role: kubernetes_argocd
when: is_localhost

View File

@@ -0,0 +1,6 @@
---
- name: Create new VM(s)
ansible.builtin.import_playbook: proxmox.yaml
- name: Provision VM
ansible.builtin.import_playbook: k3s-agents.yaml

15
playbooks/proxmox.yaml Normal file
View File

@@ -0,0 +1,15 @@
---
- name: Run proxmox vm playbook
hosts: proxmox
gather_facts: true
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
is_proxmox_node: "{{ 'proxmox_nodes' in group_names }}"
roles:
- role: common
tags:
- common
when: not is_localhost
- role: proxmox
tags:
- proxmox

View File

@@ -1,84 +0,0 @@
[vps]
mii
[k3s]
k3s-postgres
k3s-loadbalancer
k3s-server00
k3s-server01
k3s-server02
k3s-agent00
k3s-agent01
k3s-agent02
k3s-longhorn00
k3s-longhorn01
k3s-longhorn02
[k3s_server]
k3s-server00
k3s-server01
k3s-server02
[k3s_agent]
k3s-agent00
k3s-agent01
k3s-agent02
[k3s_storage]
k3s-longhorn00
k3s-longhorn01
k3s-longhorn02
[vm]
k3s-agent00
k3s-agent01
k3s-agent02
k3s-server00
k3s-server01
k3s-server02
k3s-postgres
k3s-loadbalancer
k3s-longhorn00
k3s-longhorn01
k3s-longhorn02
docker-host00
docker-host02
[k3s_nodes]
k3s-server00
k3s-server01
k3s-server02
k3s-agent00
k3s-agent01
k3s-agent02
k3s-longhorn00
k3s-longhorn01
k3s-longhorn02
[db]
k3s-postgres
[loadbalancer]
k3s-loadbalancer
[vm:vars]
ansible_ssh_common_args='-o ProxyCommand="ssh -p 22 -W %h:%p -q aya01"'
[docker]
docker-host00
docker-host01
docker-host02
docker-lb
[docker_host]
docker-host00
docker-host01
docker-host02
[docker_lb]
docker-lb
[proxmox]
aya01
lulu
inko

28
requirements.txt Normal file
View File

@@ -0,0 +1,28 @@
cachetools==5.5.2
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
distlib==0.4.0
durationpy==0.10
filelock==3.18.0
google-auth==2.40.3
identify==2.6.12
idna==3.10
kubernetes==33.1.0
nc-dnsapi==0.1.3
nodeenv==1.9.1
oauthlib==3.3.1
platformdirs==4.3.8
pre_commit==4.2.0
proxmoxer==2.2.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9.1
six==1.17.0
urllib3==2.3.0
virtualenv==20.32.0
websocket-client==1.8.0

5
requirements.yaml Normal file
View File

@@ -0,0 +1,5 @@
---
collections:
- name: community.docker
- name: community.general
- name: kubernetes.core

49
roles/common/README.md Normal file
View File

@@ -0,0 +1,49 @@
# Ansible Role: common
This role configures a baseline set of common configurations for Debian-based systems.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `vars/main.yml`):
```yaml
# The hostname to configure.
hostname: "new-host"
# A list of extra packages to install.
extra_packages:
- "htop"
- "ncdu"
- "stow"
- "unzip"
```
## Dependencies
None.
## Example Playbook
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
```yaml
- hosts: servers
roles:
- role: common
hostname: "my-new-host"
extra_packages:
- "vim"
- "curl"
```
## License
MIT
## Author Information
This role was created in 2025 by [TuDatTr](https://codeberg.org/tudattr/).

View File

@@ -0,0 +1,80 @@
xterm-ghostty|ghostty|Ghostty,
am, bce, ccc, hs, km, mc5i, mir, msgr, npc, xenl, AX, Su, Tc, XT, fullkbd,
colors#0x100, cols#80, it#8, lines#24, pairs#0x7fff,
acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, civis=\E[?25l,
clear=\E[H\E[2J, cnorm=\E[?12l\E[?25h, cr=\r,
csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
cud=\E[%p1%dB, cud1=\n, cuf=\E[%p1%dC, cuf1=\E[C,
cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
cvvis=\E[?12;25h, dch=\E[%p1%dP, dch1=\E[P, dim=\E[2m,
dl=\E[%p1%dM, dl1=\E[M, dsl=\E]2;\007, ech=\E[%p1%dX,
ed=\E[J, el=\E[K, el1=\E[1K, flash=\E[?5h$<100/>\E[?5l,
fsl=^G, home=\E[H, hpa=\E[%i%p1%dG, ht=^I, hts=\EH,
ich=\E[%p1%d@, ich1=\E[@, il=\E[%p1%dL, il1=\E[L, ind=\n,
indn=\E[%p1%dS,
initc=\E]4;%p1%d;rgb:%p2%{255}%*%{1000}%/%2.2X/%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\E\\,
invis=\E[8m, kDC=\E[3;2~, kEND=\E[1;2F, kHOM=\E[1;2H,
kIC=\E[2;2~, kLFT=\E[1;2D, kNXT=\E[6;2~, kPRV=\E[5;2~,
kRIT=\E[1;2C, kbs=^?, kcbt=\E[Z, kcub1=\EOD, kcud1=\EOB,
kcuf1=\EOC, kcuu1=\EOA, kdch1=\E[3~, kend=\EOF, kent=\EOM,
kf1=\EOP, kf10=\E[21~, kf11=\E[23~, kf12=\E[24~,
kf13=\E[1;2P, kf14=\E[1;2Q, kf15=\E[1;2R, kf16=\E[1;2S,
kf17=\E[15;2~, kf18=\E[17;2~, kf19=\E[18;2~, kf2=\EOQ,
kf20=\E[19;2~, kf21=\E[20;2~, kf22=\E[21;2~,
kf23=\E[23;2~, kf24=\E[24;2~, kf25=\E[1;5P, kf26=\E[1;5Q,
kf27=\E[1;5R, kf28=\E[1;5S, kf29=\E[15;5~, kf3=\EOR,
kf30=\E[17;5~, kf31=\E[18;5~, kf32=\E[19;5~,
kf33=\E[20;5~, kf34=\E[21;5~, kf35=\E[23;5~,
kf36=\E[24;5~, kf37=\E[1;6P, kf38=\E[1;6Q, kf39=\E[1;6R,
kf4=\EOS, kf40=\E[1;6S, kf41=\E[15;6~, kf42=\E[17;6~,
kf43=\E[18;6~, kf44=\E[19;6~, kf45=\E[20;6~,
kf46=\E[21;6~, kf47=\E[23;6~, kf48=\E[24;6~,
kf49=\E[1;3P, kf5=\E[15~, kf50=\E[1;3Q, kf51=\E[1;3R,
kf52=\E[1;3S, kf53=\E[15;3~, kf54=\E[17;3~,
kf55=\E[18;3~, kf56=\E[19;3~, kf57=\E[20;3~,
kf58=\E[21;3~, kf59=\E[23;3~, kf6=\E[17~, kf60=\E[24;3~,
kf61=\E[1;4P, kf62=\E[1;4Q, kf63=\E[1;4R, kf7=\E[18~,
kf8=\E[19~, kf9=\E[20~, khome=\EOH, kich1=\E[2~,
kind=\E[1;2B, kmous=\E[<, knp=\E[6~, kpp=\E[5~,
kri=\E[1;2A, oc=\E]104\007, op=\E[39;49m, rc=\E8,
rep=%p1%c\E[%p2%{1}%-%db, rev=\E[7m, ri=\EM,
rin=\E[%p1%dT, ritm=\E[23m, rmacs=\E(B, rmam=\E[?7l,
rmcup=\E[?1049l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
rmul=\E[24m, rs1=\E]\E\\\Ec, sc=\E7,
setab=\E[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m,
setaf=\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m,
sgr=%?%p9%t\E(0%e\E(B%;\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m,
sgr0=\E(B\E[m, sitm=\E[3m, smacs=\E(0, smam=\E[?7h,
smcup=\E[?1049h, smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m,
smul=\E[4m, tbc=\E[3g, tsl=\E]2;, u6=\E[%i%d;%dR, u7=\E[6n,
u8=\E[?%[;0123456789]c, u9=\E[c, vpa=\E[%i%p1%dd,
BD=\E[?2004l, BE=\E[?2004h, Clmg=\E[s,
Cmg=\E[%i%p1%d;%p2%ds, Dsmg=\E[?69l, E3=\E[3J,
Enmg=\E[?69h, Ms=\E]52;%p1%s;%p2%s\007, PE=\E[201~,
PS=\E[200~, RV=\E[>c, Se=\E[2 q,
Setulc=\E[58:2::%p1%{65536}%/%d:%p1%{256}%/%{255}%&%d:%p1%{255}%&%d%;m,
Smulx=\E[4:%p1%dm, Ss=\E[%p1%d q,
Sync=\E[?2026%?%p1%{1}%-%tl%eh%;,
XM=\E[?1006;1000%?%p1%{1}%=%th%el%;, XR=\E[>0q,
fd=\E[?1004l, fe=\E[?1004h, kDC3=\E[3;3~, kDC4=\E[3;4~,
kDC5=\E[3;5~, kDC6=\E[3;6~, kDC7=\E[3;7~, kDN=\E[1;2B,
kDN3=\E[1;3B, kDN4=\E[1;4B, kDN5=\E[1;5B, kDN6=\E[1;6B,
kDN7=\E[1;7B, kEND3=\E[1;3F, kEND4=\E[1;4F,
kEND5=\E[1;5F, kEND6=\E[1;6F, kEND7=\E[1;7F,
kHOM3=\E[1;3H, kHOM4=\E[1;4H, kHOM5=\E[1;5H,
kHOM6=\E[1;6H, kHOM7=\E[1;7H, kIC3=\E[2;3~, kIC4=\E[2;4~,
kIC5=\E[2;5~, kIC6=\E[2;6~, kIC7=\E[2;7~, kLFT3=\E[1;3D,
kLFT4=\E[1;4D, kLFT5=\E[1;5D, kLFT6=\E[1;6D,
kLFT7=\E[1;7D, kNXT3=\E[6;3~, kNXT4=\E[6;4~,
kNXT5=\E[6;5~, kNXT6=\E[6;6~, kNXT7=\E[6;7~,
kPRV3=\E[5;3~, kPRV4=\E[5;4~, kPRV5=\E[5;5~,
kPRV6=\E[5;6~, kPRV7=\E[5;7~, kRIT3=\E[1;3C,
kRIT4=\E[1;4C, kRIT5=\E[1;5C, kRIT6=\E[1;6C,
kRIT7=\E[1;7C, kUP=\E[1;2A, kUP3=\E[1;3A, kUP4=\E[1;4A,
kUP5=\E[1;5A, kUP6=\E[1;6A, kUP7=\E[1;7A, kxIN=\E[I,
kxOUT=\E[O, rmxx=\E[29m, rv=\E\\[[0-9]+;[0-9]+;[0-9]+c,
setrgbb=\E[48:2:%p1%d:%p2%d:%p3%dm,
setrgbf=\E[38:2:%p1%d:%p2%d:%p3%dm, smxx=\E[9m,
xm=\E[<%i%p3%d;%p1%d;%p2%d;%?%p4%tM%em%;,
xr=\EP>\\|[ -~]+a\E\\,

View File

@@ -0,0 +1,18 @@
Protocol 2
PermitRootLogin yes
MaxAuthTries 3
PubkeyAuthentication yes
PasswordAuthentication no
PermitEmptyPasswords no
ChallengeResponseAuthentication no
UsePAM yes
AllowAgentForwarding no
AllowTcpForwarding yes
X11Forwarding no
PrintMotd no
TCPKeepAlive no
ClientAliveCountMax 2
TrustedUserCAKeys /etc/ssh/vault-ca.pub
UseDNS yes
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@@ -1,4 +1,3 @@
Include /etc/ssh/sshd_config.d/*.conf
Protocol 2
PermitRootLogin no
MaxAuthTries 3
@@ -13,6 +12,7 @@ X11Forwarding no
PrintMotd no
TCPKeepAlive no
ClientAliveCountMax 2
TrustedUserCAKeys /etc/ssh/vault-ca.pub
UseDNS yes
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxIbkko72kVSfYDjJpiMH9SjHUGqBn3MbBvmotsPQhybFgnnkBpX/3fM9olP+Z6PGsmbOEs0fOjPS6uY5hjKcKsyHdZfS6cA4wjY/DL8fwATAW5FCDBtMpdg2/sb8j9jutHHs4sQeRBolVwKcv+ZAaJNnOzNHwxVUfT9bNwShthnAFjkY7oZo657FRomlkDJjmGQuratP0veKA8jYzqqPWwWidTGQerLYTyJ3Z8pbQa5eN7svrvabjjDLbVTDESE8st9WEmwvAwoj7Kz+WovCy0Uz7LRFVmaRiapM8SXtPPUC0xfyzAB3NxwBtxizdUMlShvLcL6cujcUBMulVMpsqEaOESTpmVTrMJhnJPZG/3j9ziGoYIa6hMj1J9/qLQ5dDNVVXMxw99G31x0LJoy12IE90P4Cahux8iN0Cp4oB4+B6/qledxs1fcRzsnQY/ickjKhqcJwgHzsnwjDkeYRaYte5x4f/gJ77kA20nPto7mxr2mhWot/i9B1KlMURVXOH/q4nrzhJ0hPJpM0UtzQ58TmzE4Osf/B5yoe8V//6XnelbmG/nKCIzg12d7PvaLjbFMn8IgOwDMRlip+vpyadRr/+pCawrfo4vLF7BsnJ84aoByIpbwaysgaYHtjfZWImorMVkgviC4O6Hn9/ZiLNze2A9DaNUnLVJ0nYNbmv9Q==

View File

@@ -3,4 +3,4 @@
service:
name: sshd
state: restarted
become: yes
become: true

View File

@@ -0,0 +1,24 @@
---
- name: Copy bash-configs
ansible.builtin.template:
src: "files/bash/{{ item }}"
dest: "{{ ansible_env.HOME }}/.{{ item }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
loop:
- bashrc
- bash_aliases
- name: Copy ghostty infocmp
ansible.builtin.copy:
src: files/ghostty/infocmp
dest: "{{ ansible_env.HOME }}/ghostty"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "0644"
register: ghostty_terminfo
- name: Compile ghostty terminalinfo
ansible.builtin.command: "tic -x {{ ansible_env.HOME }}/ghostty"
when: ghostty_terminfo.changed

View File

@@ -1,12 +0,0 @@
---
- name: Copy bash-configs
ansible.builtin.template:
src: "files/bash/{{ item }}"
dest: "/home/{{ user }}/.{{ item }}"
owner: "{{ user }}"
group: "{{ user }}"
mode: "644"
loop:
- bashrc
- bash_aliases
become: true

View File

@@ -11,7 +11,6 @@
url: https://raw.githubusercontent.com/eza-community/eza/main/deb.asc
dest: /etc/apt/keyrings/gierens.asc
mode: "0644"
register: gpg_key_result
become: true
- name: Add Gierens repository to apt sources
@@ -80,12 +79,13 @@
path: ~/.config/nvim
register: nvim_config
- name: Clone LazyVim starter to Neovim config directory
- name: Clone personal Neovim config directory
ansible.builtin.git:
repo: https://github.com/LazyVim/starter
repo: https://codeberg.org/tudattr/nvim
dest: ~/.config/nvim
clone: true
update: false
version: 1.0.0
when: not nvim_config.stat.exists
- name: Remove .git directory from Neovim config

View File

@@ -1,14 +1,14 @@
---
- name: Set a hostname
ansible.builtin.hostname:
name: "{{ host.hostname }}"
name: "{{ inventory_hostname }}"
become: true
- name: Update /etc/hosts to reflect the new hostname
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: '^127\.0\.1\.1'
line: "127.0.1.1 {{ host.hostname }}"
line: "127.0.1.1 {{ inventory_hostname }}"
state: present
backup: true
become: true

View File

@@ -0,0 +1,13 @@
---
- name: Configure Time
ansible.builtin.include_tasks: time.yaml
- name: Configure Packages
ansible.builtin.include_tasks: packages.yaml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yaml
- name: Configure Extra-Packages
ansible.builtin.include_tasks: extra_packages.yaml
- name: Configure Bash
ansible.builtin.include_tasks: bash.yaml
- name: Configure SSH
ansible.builtin.include_tasks: sshd.yaml

View File

@@ -1,13 +0,0 @@
---
- name: Configure Time
ansible.builtin.include_tasks: time.yml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yml
- name: Configure Packages
ansible.builtin.include_tasks: packages.yml
- name: Configure Extra-Packages
ansible.builtin.include_tasks: extra_packages.yml
- name: Configure Bash
ansible.builtin.include_tasks: bash.yml
- name: Configure SSH
ansible.builtin.include_tasks: sshd.yml

View File

@@ -0,0 +1,28 @@
---
- name: Update and upgrade packages
ansible.builtin.apt:
update_cache: true
upgrade: true
autoremove: true
become: true
when: ansible_user_id != "root"
- name: Install base packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
become: true
when: ansible_user_id != "root"
- name: Update and upgrade packages
ansible.builtin.apt:
update_cache: true
upgrade: true
autoremove: true
when: ansible_user_id == "root"
- name: Install base packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
when: ansible_user_id == "root"

View File

@@ -1,13 +0,0 @@
---
- name: Update and upgrade packages
ansible.builtin.apt:
update_cache: true
upgrade: true
autoremove: true
become: true
- name: Install base packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
become: true

View File

@@ -0,0 +1,28 @@
---
- name: Copy user sshd_config
ansible.builtin.template:
src: files/ssh/user/sshd_config
dest: /etc/ssh/sshd_config
mode: "644"
backup: true
notify:
- Restart sshd
become: true
when: ansible_user_id != "root"
- name: Copy root sshd_config
ansible.builtin.template:
src: files/ssh/root/sshd_config
dest: /etc/ssh/sshd_config
mode: "644"
backup: true
notify:
- Restart sshd
when: ansible_user_id == "root"
- name: Copy pubkey
ansible.builtin.copy:
src: files/ssh/vault-ca.pub
dest: "/etc/ssh/vault-ca.pub"
mode: "644"
become: true

View File

@@ -1,17 +0,0 @@
---
- name: Copy sshd_config
ansible.builtin.template:
src: templates/ssh/sshd_config
dest: /etc/ssh/sshd_config
mode: "644"
notify:
- Restart sshd
become: true
- name: Copy pubkey
ansible.builtin.copy:
content: "{{ pubkey }}"
dest: "/home/{{ user }}/.ssh/authorized_keys"
owner: "{{ user }}"
group: "{{ user }}"
mode: "644"

View File

@@ -0,0 +1,11 @@
---
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
become: true
when: ansible_user_id != "root"
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
when: ansible_user_id == "root"

View File

@@ -1,4 +0,0 @@
---
- name: Set timezone to "{{ timezone }}"
community.general.timezone:
name: "{{ timezone }}"

View File

@@ -0,0 +1,18 @@
common_packages:
- build-essential
- curl
- git
- iperf3
- neovim
- rsync
- smartmontools
- sudo
- systemd-timesyncd
- tree
- screen
- bat
- fd-find
- ripgrep
- nfs-common
- open-iscsi
- parted

View File

@@ -0,0 +1,85 @@
# Ansible Role: Docker Host
This role sets up a Docker host, installs Docker, and configures it according to the provided variables. It also handles user and group management, directory setup, and deployment of Docker Compose services.
## Role Variables
### General
- `docker_host_package_common_dependencies`: A list of common packages to be installed on the host.
- Default: `nfs-common`, `firmware-misc-nonfree`, `linux-image-amd64`
- `apt_lock_files`: A list of apt lock files to check.
- `arch`: The architecture of the host.
- Default: `arm64` if `ansible_architecture` is `aarch64`, otherwise `amd64`.
### Docker
- `docker.url`: The URL for the Docker repository.
- Default: `https://download.docker.com/linux`
- `docker.apt_release_channel`: The Docker apt release channel.
- Default: `stable`
- `docker.directories.local`: The local directory for Docker data.
- Default: `/opt/local`
- `docker.directories.config`: The directory for Docker configurations.
- Default: `/opt/config`
- `docker.directories.compose`: The directory for Docker Compose files.
- Default: `/opt/compose`
### Keycloak
- `keycloak_config`: A dictionary containing the Keycloak configuration. See `templates/keycloak/realm.json.j2` for more details.
### Services
- `services`: A list of dictionaries, where each dictionary represents a Docker Compose service. See `templates/compose.yaml.j2` for more details.
## Tasks
The role performs the following tasks:
1. **Setup VM**:
- Includes `non-free` and `non-free-firmware` components in the apt sources.
- Installs common packages.
- Removes cloud kernel packages.
- Reboots the host.
2. **Install Docker**:
- Uninstalls old Docker versions.
- Installs dependencies for using repositories over HTTPS.
- Adds the Docker apt key and repository.
- Installs Docker Engine, containerd, and Docker Compose.
3. **Setup user and group for Docker**:
- Ensures the `docker` group exists.
- Adds the `ansible_user_id` to the `docker` group.
- Reboots the host.
4. **Setup directory structure for Docker**:
- Creates necessary directories for Docker and media.
- Sets ownership of the directories.
- Mounts NFS shares.
5. **Deploy configs**:
- Sets up Keycloak realms if the host is a Keycloak host.
6. **Deploy Docker Compose**:
- Copies the Docker Compose file to the target host.
7. **Publish metrics**:
- Copies the `daemon.json` file to `/etc/docker/daemon.json` to enable metrics.
## Handlers
- `Restart docker`: Restarts the Docker service.
- `Restart compose`: Restarts the Docker Compose services.
- `Restart host`: Reboots the host.
## Usage
To use this role, include it in your playbook and set the required variables.
```yaml
- hosts: docker_hosts
roles:
- role: docker_host
vars:
# Your variables here
```
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details.

View File

@@ -8,4 +8,14 @@
- name: Restart compose
community.docker.docker_compose_v2:
project_src: "{{ docker.directories.compose }}"
state: restarted
state: present
retries: 3
delay: 5
become: true
- name: Restart host
ansible.builtin.reboot:
connect_timeout: 5
reboot_timeout: 600
test_command: whoami
become: true

View File

@@ -0,0 +1,50 @@
---
- name: Check if debian.sources file exists
ansible.builtin.stat:
path: /etc/apt/sources.list.d/debian.sources
register: debian_sources_stat
- name: Replace Components line to include non-free and non-free-firmware
ansible.builtin.replace:
path: /etc/apt/sources.list.d/debian.sources
regexp: "^Components:.*$"
replace: "Components: main non-free non-free-firmware"
when: debian_sources_stat.stat.exists
become: true
- name: Setup VM Packages
ansible.builtin.apt:
name: "{{ item }}"
state: present
update_cache: true
loop: "{{ docker_host_package_common_dependencies }}"
become: true
- name: Gather installed package facts
ansible.builtin.package_facts:
manager: auto
- name: Filter for specific cloud kernel packages
ansible.builtin.set_fact:
cloud_kernel_packages: >-
{{
ansible_facts.packages.keys()
| select('search', 'linux-image')
| select('search', 'cloud')
| list
}}
- name: Use the list to remove the found packages
ansible.builtin.apt:
name: "{{ cloud_kernel_packages }}"
state: absent
autoremove: true
when: cloud_kernel_packages | length > 0
become: true
- name: Restart host
ansible.builtin.reboot:
connect_timeout: 5
reboot_timeout: 600
test_command: whoami
become: true

View File

@@ -26,6 +26,7 @@
- curl
- gnupg
- lsb-release
- qemu-guest-agent
become: true
- name: Add Docker apt key.

View File

@@ -5,10 +5,12 @@
state: present
become: true
- name: Append the group docker to "{{ user }}"
- name: Append the group docker to "{{ ansible_user_id }}"
ansible.builtin.user:
name: "{{ user }}"
name: "{{ ansible_user_id }}"
shell: /bin/bash
groups: docker
append: true
become: true
notify:
- Restart host

View File

@@ -5,23 +5,23 @@
state: directory
mode: "0755"
loop:
- /media/docker
- /media/series
- /media/movies
- /media/songs
- "{{ docker.directories.opt }}"
- "{{ docker.directories.local }}"
- "{{ docker.directories.config }}"
- "{{ docker.directories.compose }}"
- /opt/local
become: true
- name: Set ownership to {{ user }}
- name: Set ownership to {{ ansible_user_id }}
ansible.builtin.file:
path: "{{ item }}"
owner: "{{ user }}"
group: "{{ user }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
loop:
- "{{ docker.directories.opt }}"
- /opt/local
- "{{ docker.directories.local }}"
- "{{ docker.directories.config }}"
- "{{ docker.directories.compose }}"
- /media
become: true
@@ -37,4 +37,5 @@
- /media/series
- /media/movies
- /media/songs
- /media/downloads
become: true

View File

@@ -0,0 +1,31 @@
---
- name: Set fact if this host should run Keycloak
ansible.builtin.set_fact:
is_keycloak_host: "{{ inventory_hostname in (services | selectattr('name', 'equalto', 'keycloak') | map(attribute='vm') | first) }}"
- name: Create Keycloak directories
ansible.builtin.file:
path: "{{ docker.directories.local }}/keycloak/"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
state: directory
mode: "0755"
when: is_keycloak_host | bool
become: true
- name: Setup Keycloak realms
ansible.builtin.template:
src: "templates/keycloak/realm.json.j2"
dest: "{{ docker.directories.local }}/keycloak/{{ keycloak.realm }}-realm.json"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
backup: true
when: is_keycloak_host | bool
loop: "{{ keycloak_config.realms }}"
loop_control:
loop_var: keycloak
notify:
- Restart docker
- Restart compose
become: true

View File

@@ -3,8 +3,8 @@
ansible.builtin.template:
src: "templates/compose.yaml.j2"
dest: "{{ docker.directories.compose }}/compose.yaml"
owner: "{{ user }}"
group: "{{ user }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
backup: true
notify:

View File

@@ -0,0 +1,21 @@
---
- name: Setup VM
ansible.builtin.include_tasks: 10_setup.yaml
- name: Install docker
ansible.builtin.include_tasks: 20_installation.yaml
- name: Setup user and group for docker
ansible.builtin.include_tasks: 30_user_group_setup.yaml
- name: Setup directory structure for docker
ansible.builtin.include_tasks: 40_directory_setup.yaml
# - name: Deploy configs
# ansible.builtin.include_tasks: 50_provision.yaml
- name: Deploy docker compose
ansible.builtin.include_tasks: 60_deploy_compose.yaml
- name: Publish metrics
ansible.builtin.include_tasks: 70_export.yaml

View File

@@ -1,18 +0,0 @@
---
- name: Setup VM
ansible.builtin.include_tasks: setup.yml
- name: Install docker
ansible.builtin.include_tasks: installation.yml
- name: Setup user and group for docker
ansible.builtin.include_tasks: user_group_setup.yml
- name: Setup directory structure for docker
ansible.builtin.include_tasks: directory_setup.yml
- name: Deploy docker compose
ansible.builtin.include_tasks: deploy_compose.yml
- name: Publish metrics
ansible.builtin.include_tasks: export.yml

View File

@@ -1,9 +0,0 @@
---
- name: Enable HW accelerate for VM
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- firmware-misc-nonfree
- nfs-common
become: true

View File

@@ -1,12 +1,13 @@
services:
{% for service in services %}
{% if inventory_hostname in service.vm %}
{{service.name}}:
{{ service.name }}:
container_name: {{ service.container_name }}
image: {{ service.image }}
restart: {{ service.restart }}
restart: unless-stopped
{% if service.network_mode is not defined %}
hostname: {{service.name}}
hostname: {{ service.name }}
networks:
- net
{% endif %}
@@ -15,11 +16,40 @@ services:
ports:
{% for port in service.ports %}
{% if port.internal != 'proxy_only' %}
- {{port.external}}:{{port.internal}}
- {{ port.external }}:{{ port.internal }}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% if service.ports is defined and service.ports is iterable %}
{% set first_http_port = service.ports | default([]) | selectattr('name', 'defined') | selectattr('name', 'search', 'http') | first %}
{% set chosen_http_port_value = none %}
{% if first_http_port is not none %}
{% if first_http_port.internal is defined and first_http_port.internal == 'proxy_only' %}
{% if first_http_port.external is defined %}
{% set chosen_http_port_value = first_http_port.external %}
{% endif %}
{% else %}
{% set chosen_http_port_value = first_http_port.internal %}
{% endif %}
{% if chosen_http_port_value is defined %}
healthcheck:
{% set healthcheck = 'curl' %}
{% if service.healthcheck is defined %}
{% set healthcheck = service.healthcheck %}
{% endif %}
{% if healthcheck == 'curl' %}
test: ["CMD", "curl", "-f", "--silent", "--show-error", "--connect-timeout", "5", "http://localhost:{{ chosen_http_port_value }}/"]
{% elif healthcheck == 'wget' %}
test: ["CMD-SHELL", "wget --quiet --spider --timeout=5 http://localhost:{{ chosen_http_port_value }}/ || exit 1"]
{% endif %}
interval: 30s
timeout: 10s
retries: 5
start_period: 20s
{% endif %}
{% endif %}
{% endif %}
{% if service.cap_add is defined and service.cap_add is iterable %}
cap_add:
{% for cap in service.cap_add %}
@@ -41,46 +71,88 @@ services:
{% if service.volumes is defined and service.volumes is iterable %}
volumes:
{% for volume in service.volumes %}
- {{volume.external}}:{{volume.internal}}
- {{ volume.external }}:{{ volume.internal }}
{% endfor %}
{% endif %}
{% if service.environment is defined and service.environment is iterable %}
environment:
{% for env in service.environment %}
- {{env}}
- {{ env }}
{% endfor %}
{% endif %}
{% if service.devices is defined and service.devices is iterable %}
devices:
{% for device in service.devices %}
- {{device.external}}:{{device.internal}}
- {{ device.external }}:{{ device.internal }}
{% endfor %}
{% endif %}
{% if service.name == 'paperless' %}
{{service.name}}-broker:
container_name: paperless-broker
image: docker.io/library/redis:7
restart: unless-stopped
networks:
- net
volumes:
- /opt/local/paperless/redis/data:/data
{{service.name}}-postgres:
container_name: paperless-postgres
image: docker.io/library/postgres:15
restart: unless-stopped
networks:
- net
volumes:
- /opt/local/paperless/db/data:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: 5fnhn%u2YWY3paNvMAjdoufYPQ2Hf3Yi
{% if service.command is defined and service.command is iterable %}
command:
{% for command in service.command %}
- {{ command }}
{% endfor %}
{% endif %}
{% if service.sub_service is defined and service.sub_service is iterable %}
{% for sub in service.sub_service %}
{% if sub.name is defined and sub.name == "postgres" %}
{{ service.name }}-postgres:
container_name: {{ service.name }}-postgres
image: docker.io/library/postgres:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-postgres
networks:
- net
volumes:
- /opt/local/{{ service.name }}/postgres/data:/var/lib/postgresql/data
environment:
POSTGRES_DB: {{ service.name }}
POSTGRES_USER: {{ sub.username }}
POSTGRES_PASSWORD: {{ sub.password }}
{% endif %}
{% if sub.name is defined and sub.name == "redis" %}
{{ service.name }}-redis:
container_name: {{ service.name }}-redis
image: docker.io/library/redis:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-redis
networks:
- net
volumes:
- /opt/local/{{ service.name }}/redis/data:/data
{% endif %}
{% if sub.name is defined and sub.name == "chrome" %}
{{ service.name }}-chrome:
image: gcr.io/zenika-hub/alpine-chrome:{{ sub.version }}
container_name: {{ service.name }}-chrome
restart: unless-stopped
networks:
- net
command:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
{% endif %}
{% if sub.name is defined and sub.name == "meilisearch" %}
{{ service.name }}-meilisearch:
container_name: {{ service.name }}-meilisearch
image: getmeili/meilisearch:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-meilisearch
networks:
- net
volumes:
- /opt/local/{{ service.name }}/mailisearch/data:/meili_data
environment:
- MEILI_NO_ANALYTICS=true
- NEXTAUTH_SECRET={{ sub.nextauth_secret }}
- MEILI_MASTER_KEY={{ sub.meili_master_key }}
- OPENAI_API_KEY="{{ sub.openai_key }}"
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% endfor %}
networks:
@@ -90,6 +162,3 @@ networks:
driver: default
config:
- subnet: 172.16.69.0/24
volumes:
prometheus_data: {}

View File

@@ -0,0 +1,79 @@
{
"realm": "{{ keycloak.realm }}",
"enabled": true,
"displayName": "{{ keycloak.display_name }}",
"displayNameHtml": "<div class=\"kc-logo-text\">{{keycloak.display_name}}</div>",
"bruteForceProtected": true,
"users": [
{% if keycloak.users is defined and keycloak.users is iterable %}
{% for user in keycloak.users %}
{
"username": "{{ user.username }}",
"enabled": true,
"credentials": [
{
"type": "password",
"value": "{{ user.password }}",
"temporary": false
}
],
"realmRoles": [
{% for realm_role in user.realm_roles %}
"{{ realm_role }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"clientRoles": {
"account": [
{% for account in user.client_roles.account %}
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}
},{% if not loop.last %}{% endif %}
{% endfor %}
{% endif %}
{
"username": "{{ keycloak.admin.username }}",
"enabled": true,
"credentials": [
{
"type": "password",
"value": "{{ keycloak.admin.password }}",
"temporary": false
}
],
"realmRoles": [
{% for realm_role in keycloak.admin.realm_roles %}
"{{ realm_role }}"{% if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"clientRoles": {
"realm-management": [
{% for realm_management in keycloak.admin.client_roles.realm_management %}
"{{ realm_management }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"account": [
{% for account in keycloak.admin.client_roles.account %}
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}
}
],
"roles": {
"realm": [
{% for role in keycloak.roles.realm %}
{
"name": "{{ role.name }}",
"description": "{{ role.name }}"
}{% if not loop.last %},{% endif %}
{% endfor %}
]
},
"defaultRoles": [
{% for role in keycloak.roles.default_roles %}
"{{ role }}"{% if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}

View File

@@ -0,0 +1,7 @@
docker_host_package_common_dependencies:
- nfs-common
apt_lock_files:
- /var/lib/dpkg/lock
- /var/lib/dpkg/lock-frontend
- /var/cache/apt/archives/lock

39
roles/k3s_agent/README.md Normal file
View File

@@ -0,0 +1,39 @@
# K3s Agent Ansible Role
This Ansible role installs and configures a K3s agent on a node.
## Role Variables
- `k3s.loadbalancer.default_port`: The port for the K3s load balancer. Defaults to `6443`.
- `k3s_token`: The token for joining the K3s cluster. This is a required variable.
- `hostvars['k3s-loadbalancer'].ansible_default_ipv4.address`: The IP address of the K3s load balancer. This is a required variable.
## Tasks
The main tasks are in `tasks/main.yml` and `tasks/installation.yml`.
- **`installation.yml`**:
- Installs `qemu-guest-agent`.
- Checks if K3s is already installed.
- Downloads the K3s installation script to `/tmp/k3s_install.sh`.
- Installs K3s as an agent, connecting to the master.
## Handlers
The main handlers are in `handlers/main.yml`.
- **`Restart k3s`**: Restarts the `k3s` service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
---
- hosts: k3s_agents
roles:
- role: k3s_agent
vars:
k3s_token: "your_k3s_token"
k3s.loadbalancer.default_port: 6443
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -1,4 +1,12 @@
---
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
@@ -11,11 +19,11 @@
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s on the secondary servers
- name: Install K3s on agent
when: not k3s_status.stat.exists
ansible.builtin.command: |
/tmp/k3s_install.sh
environment:
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
K3S_TOKEN: "{{ k3s_token }}"
become: true

View File

@@ -0,0 +1,3 @@
---
- name: Install k3s agent
include_tasks: installation.yaml

View File

@@ -1,2 +0,0 @@
---
- include_tasks: installation.yml

View File

@@ -0,0 +1,50 @@
# K3s Loadbalancer Ansible Role
This Ansible role configures a load balancer for a K3s cluster using Nginx.
## Role Variables
- `k3s_loadbalancer_nginx_config_path`: The path to the Nginx configuration file. Defaults to `/etc/nginx/nginx.conf`.
- `domain`: The domain name to use for the load balancer. Defaults to `{{ internal_domain }}`.
- `k3s.loadbalancer.default_port`: The default port for the K3s API server. Defaults to `6443`.
- `k3s_server_ips`: A list of IP addresses for the K3s server nodes. This variable is not defined in the role, so you must provide it.
- `netcup_api_key`: Your Netcup API key.
- `netcup_api_password`: Your Netcup API password.
- `netcup_customer_id`: Your Netcup customer ID.
## Tasks
The role performs the following tasks:
- **Installation:**
- Updates the `apt` cache.
- Installs `qemu-guest-agent`.
- Installs `nginx-full`.
- **Configuration:**
- Templates the Nginx configuration file with dynamic upstreams for the K3s servers.
- Enables and starts the Nginx service.
- **DNS Setup:**
- Sets up a DNS A record for the load balancer using the `community.general.netcup_dns` module.
## Handlers
- `Restart nginx`: Restarts the Nginx service when the configuration file is changed.
## Example Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_loadbalancer
roles:
- role: k3s_loadbalancer
vars:
k3s_server_ips:
- 192.168.1.10
- 192.168.1.11
- 192.168.1.12
netcup_api_key: "your_api_key"
netcup_api_password: "your_api_password"
netcup_customer_id: "your_customer_id"
internal_domain: "example.com"
```

View File

@@ -2,15 +2,13 @@
- name: Template the nginx config file with dynamic upstreams
ansible.builtin.template:
src: templates/nginx.conf.j2
dest: "{{ nginx_config_path }}"
dest: "{{ k3s_loadbalancer_nginx_config_path }}"
owner: root
group: root
mode: "0644"
become: true
notify:
- Restart nginx
vars:
k3s_server_ips: "{{ k3s.server.ips }}"
- name: Enable nginx
ansible.builtin.systemd:

View File

@@ -0,0 +1,20 @@
---
- name: Update apt cache
ansible.builtin.apt:
update_cache: true
become: true
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: Install Nginx
ansible.builtin.apt:
name:
- nginx-full
state: present
become: true

View File

@@ -0,0 +1,17 @@
---
- name: Installation
ansible.builtin.include_tasks: installation.yaml
- name: Configure
ansible.builtin.include_tasks: configuration.yaml
- name: Setup DNS on Netcup
community.general.netcup_dns:
api_key: "{{ netcup_api_key }}"
api_password: "{{ netcup_api_password }}"
customer_id: "{{ netcup_customer_id }}"
domain: "{{ domain }}"
name: "k3s"
type: "A"
value: "{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}"
delegate_to: localhost

View File

@@ -0,0 +1,98 @@
include /etc/nginx/modules-enabled/*.conf;
events {}
stream {
upstream k3s_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:{{ k3s.loadbalancer.default_port }};
{% endfor %}
}
server {
listen {{k3s.loadbalancer.default_port}};
proxy_pass k3s_servers;
}
upstream etcd_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:2379;
{% endfor %}
}
server {
listen 2379;
proxy_pass etcd_servers;
}
upstream dns_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:53;
{% endfor %}
}
server {
listen 53 udp;
proxy_pass dns_servers;
}
}
# http {
# upstream k3s_servers_http {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:80;
# {% endfor %}
# }
#
# upstream k3s_servers_https {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:443;
# {% endfor %}
# }
#
# server {
# listen 80;
#
# location / {
# proxy_pass http://k3s_servers_http;
# proxy_set_header Host $http_host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto http;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name staging.k3s.seyshiro.de *.staging.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/staging_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/staging_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name k3s.seyshiro.de *.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/production_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/production_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
# }

View File

@@ -0,0 +1,3 @@
k3s_loadbalancer_nginx_config_path: "/etc/nginx/nginx.conf"
domain: "{{ internal_domain }}"

View File

@@ -0,0 +1,49 @@
# K3s Server Ansible Role
This Ansible role installs and configures a K3s server cluster.
## Role Variables
- `k3s_primary_server_ip`: The IP address of the primary K3s server.
- `k3s_server_name`: The server name for the K3s cluster.
- `k3s_cluster_name`: The name for the K3s cluster in the kubeconfig.
- `k3s_user_name`: The user name for the K3s cluster in the kubeconfig.
- `k3s_context_name`: The context name for the K3s cluster in the kubeconfig.
- `k3s_server_token_vault_file`: The path to the Ansible Vault file containing the K3s token. Default is `../vars/group_vars/k3s/secrets_token.yml`.
## Tasks
The main tasks are:
1. **Install dependencies**: Installs `qemu-guest-agent`.
2. **Primary Server Installation**:
- Downloads the K3s installation script.
- Installs the K3s server on the primary node with a TLS SAN.
3. **Pull Token**:
- Retrieves the K3s token from the primary server.
- Stores the token in an Ansible Vault encrypted file.
4. **Secondary Server Installation**:
- Installs K3s on the secondary servers, joining them to the cluster using the token from the vault.
5. **Create Kubeconfig**:
- Slurps the `k3s.yaml` from the primary server.
- Creates a kubeconfig file on the local machine for accessing the cluster.
## Handlers
- `Restart k3s`: Restarts the K3s service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_servers
roles:
- role: k3s_server
vars:
k3s_primary_server_ip: "192.168.1.100"
k3s_server_name: "k3s.example.com"
k3s_cluster_name: "my-k3s-cluster"
k3s_user_name: "my-k3s-user"
k3s_context_name: "my-k3s-context"
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -0,0 +1,87 @@
---
- name: Slurp original k3s.yaml from primary K3s server
ansible.builtin.slurp:
src: /etc/rancher/k3s/k3s.yaml
register: original_k3s_kubeconfig_slurp
become: true
- name: Parse original k3s.yaml content to extract cert data
ansible.builtin.set_fact:
original_parsed_k3s_kubeconfig: "{{ original_k3s_kubeconfig_slurp.content | b64decode | from_yaml }}"
delegate_to: localhost
run_once: true
- name: Set facts for certificate and key data needed by the template
ansible.builtin.set_fact:
k3s_server_ca_data: "{{ original_parsed_k3s_kubeconfig.clusters[0].cluster['certificate-authority-data'] }}"
k3s_client_cert_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-certificate-data'] }}"
k3s_client_key_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-key-data'] }}"
delegate_to: localhost
run_once: true
- name: Decode and save K3s Server CA certificate
ansible.builtin.copy:
content: "{{ k3s_server_ca_data | b64decode }}"
dest: "/tmp/k3s-ca.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client certificate
ansible.builtin.copy:
content: "{{ k3s_client_cert_data | b64decode }}"
dest: "/tmp/k3s-client.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client key
ansible.builtin.copy:
content: "{{ k3s_client_key_data | b64decode }}"
dest: "/tmp/k3s-client.key"
mode: "0600"
delegate_to: localhost
become: false
- name: Add K3s cluster to kubeconfig
ansible.builtin.command: >
kubectl config set-cluster "{{ k3s_cluster_name }}"
--server="https://{{ k3s_server_name }}:6443"
--certificate-authority=/tmp/k3s-ca.crt
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s user credentials to kubeconfig
ansible.builtin.command: >
kubectl config set-credentials "{{ k3s_user_name }}"
--client-certificate=/tmp/k3s-client.crt
--client-key=/tmp/k3s-client.key
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s context to kubeconfig
ansible.builtin.command: >
kubectl config set-context "{{ k3s_context_name }}"
--cluster="{{ k3s_cluster_name }}"
--user="{{ k3s_user_name }}"
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Clean up temporary certificate and key files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s-ca.crt"
- "/tmp/k3s-client.crt"
- "/tmp/k3s-client.key"
delegate_to: localhost
become: false

View File

@@ -1,58 +0,0 @@
---
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Download K3s install script to /tmp/
when: not k3s_status.stat.exists
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s server with node taint and TLS SAN
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
environment:
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
become: true
async: 300
poll: 0
register: k3s_primary_install
- name: Wait for K3s to be installed
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.async_status:
jid: "{{ k3s_primary_install.ansible_job_id }}"
register: k3s_primary_install_status
until: k3s_primary_install_status.finished
retries: 60
delay: 5
become: true
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0]
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0]
ansible.builtin.set_fact:
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
- name: Install K3s on the secondary servers
when: (host.ip != k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
environment:
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
K3S_TOKEN: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
become: true

View File

@@ -0,0 +1,29 @@
---
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
update_cache: true
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Install primary k3s server
include_tasks: primary_installation.yaml
when: ansible_default_ipv4.address == k3s_primary_server_ip
- name: Get token from primary k3s server
include_tasks: pull_token.yaml
- name: Install seconary k3s servers
include_tasks: secondary_installation.yaml
when: ansible_default_ipv4.address != k3s_primary_server_ip
- name: Set kubeconfig on localhost
include_tasks: create_kubeconfig.yaml
when: ansible_default_ipv4.address == k3s_primary_server_ip

Some files were not shown because too many files have changed in this diff Show More