35 Commits

Author SHA1 Message Date
Tuan-Dat Tran
0a3171b9bc feat(k3s): Added 2 nodes (2/2)
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2026-01-26 23:08:34 +01:00
Tuan-Dat Tran
3068a5a8fb feat(k3s): Added 2 nodesg
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2026-01-26 22:42:19 +01:00
Tuan-Dat Tran
ef652fac20 refactor: yml -> yaml
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-11-07 20:44:14 +01:00
Tuan-Dat Tran
22c1b534ab feat(k3s): Add new node and machine
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-26 10:41:11 +01:00
Tuan-Dat Tran
9cb90a8020 feat(caddy): netcup->cf
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-25 09:25:40 +02:00
Tuan-Dat Tran
d9181515bb feat(k3s): Added (temporary) node
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-19 01:33:42 +02:00
Tuan-Dat Tran
c3905ed144 feat(git): Add .gitattributes for ansible-vault git diff
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-19 00:34:51 +02:00
Tuan-Dat Tran
5fb50ab4b2 feat(k3s): Add new node
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-10-07 23:46:40 +02:00
Tuan-Dat Tran
2909d6e16c feat(nfs): Removed unused/removed nfs servers
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
0aed818be5 feat(docker): Removed nodes docker-host10 and docker-host12
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
fbdeec93ce feat(docker): match services that moved to k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
44626101de feat(docker): match services that moved to k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
c1d6f13275 refactor(ansible-lint): fixed ansible-lint warnings
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
282e98e90a fix(proxmox): commented 'non-errors' on script
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-15 23:29:03 +02:00
Tuan-Dat Tran
9573cbfcad feat(k3s): Added 2 nodes
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-07 21:21:33 +02:00
Tuan-Dat Tran
48aec11d8c feat(common): added iscsi for longhorn on k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-07 18:17:33 +02:00
Tuan-Dat Tran
a1da69ac98 feat(proxmox): check_vm as cronjob
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-09-02 19:52:49 +02:00
Tuan-Dat Tran
7aa16f3207 Added blog.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 22:59:01 +02:00
Tuan-Dat Tran
fe3f1749c5 Update README.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 22:51:15 +02:00
Tuan-Dat Tran
6eef96b302 feat(pre-commit): Added linting 2025-07-27 22:46:23 +02:00
Tuan-Dat Tran
2882abfc0b Added README.md for roles
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:40:46 +02:00
Tuan-Dat Tran
2b759cc2ab Update README.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:16:35 +02:00
Tuan-Dat Tran
dbaebaee80 cleanup: services moved to argocd
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 13:58:25 +02:00
Tuan-Dat Tran
89c51aa45c feat(argo): app-of-app argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-25 07:58:41 +02:00
Tuan-Dat Tran
0139850ee3 feat(reverse_proxy): fix caddy letsencrypt
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 21:26:11 +02:00
Tuan-Dat Tran
976cad51e2 refactor(k3s): enhance cluster setup and enable ArgoCD apps
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 07:23:23 +02:00
Tuan-Dat Tran
e1a2248154 feat(kubernetes): add nfs-provisioner
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-15 23:24:52 +02:00
Tuan-Dat Tran
d8fd094379 feat(kubernetes): stable kubernetes with argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-14 22:57:13 +02:00
Tuan-Dat Tran
76000f8123 feat(kubernetes): add initial setup for ArgoCD, Cert-Manager, MetalLB, and Traefik
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 14:25:53 +02:00
Tuan-Dat Tran
4aa939426b refactor(k3s): enhance kubeconfig generation and token management
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 09:33:39 +02:00
Tuan-Dat Tran
9cce71f73b refactor(k3s): manage token securely and install guest agent
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 02:15:01 +02:00
Tuan-Dat Tran
97a5d6c41d refactor(k3s): centralize k3s primary server IP and integrate Netcup DNS
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 01:30:05 +02:00
Tuan-Dat Tran
f1b0cfad2c refactor(k3s): streamline inventory and primary server IP handling
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 00:40:48 +02:00
Tuan-Dat Tran
dac0d88d60 feat(proxmox): add k3s agents and refine VM provisioning
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 23:08:44 +02:00
Tuan-Dat Tran
609e000089 refactor(ansible): centralize inventory and variables in 'vars' directory
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 21:38:53 +02:00
169 changed files with 2068 additions and 1467 deletions

View File

@@ -13,6 +13,8 @@ skip_list:
- fqcn-builtins
- no-handler
- var-naming
- no-changed-when
- risky-shell-pipe
# Enforce certain rules that are not enabled by default.
enable_list:
@@ -25,7 +27,7 @@ enable_list:
- no-changed-when
# Offline mode disables any features that require internet access.
offline: true
offline: false
# Set the desired verbosity level.
verbosity: 1

8
.gitattributes vendored Normal file
View File

@@ -0,0 +1,8 @@
vars/group_vars/proxmox/secrets_vm.yml diff=ansible-vault merge=binary
vars/group_vars/all/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/docker/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/k3s/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/k3s/secrets_token.yml diff=ansible-vault merge=binary
vars/group_vars/kubernetes/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/proxmox/secrets.yml diff=ansible-vault merge=binary
vars/group_vars/proxmox/secrets_vm.yml diff=ansible-vault merge=binary

23
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,23 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: local
hooks:
- id: ansible-galaxy-install
name: Install ansible-galaxy collections
entry: ansible-galaxy collection install -r requirements.yaml
language: system
pass_filenames: false
always_run: true
- repo: https://github.com/ansible/ansible-lint
rev: v6.22.2
hooks:
- id: ansible-lint
files: \.(yaml)$
additional_dependencies:
- ansible-core==2.15.8

139
README.md
View File

@@ -2,86 +2,81 @@
**I do not recommend this project being used for ones own infrastructure, as
this project is heavily attuned to my specific host/network setup**
The Ansible Project to provision fresh Debian VMs for my Proxmox instances.
Some values are hard coded such as the public key both in
[./scripts/debian_seed.sh](./scripts/debian_seed.sh) and [./group_vars/all/vars.yml](./group_vars/all/vars.yml).
## Prerequisites
This Ansible project automates the setup of a K3s Kubernetes cluster on Proxmox VE. It also includes playbooks for configuring Docker hosts, load balancers, and other services.
- [secrets.yml](secrets.yml) in the root directory of this repository.
Skeleton file can be found as [./secrets.yml.skeleton](./secrets.yml.skeleton).
- IP Configuration of hosts like in [./host_vars/\*](./host_vars/*)
- Setup [~/.ssh/config](~/.ssh/config) for the respective hosts used.
- Install `passlib` for your operating system. Needed to hash passwords ad-hoc.
## Repository Structure
## Improvable Variables
The repository is organized into the following main directories:
- `group_vars/k3s/vars.yml`:
- `k3s.server.ips`: Take list of IPs from host_vars `k3s_server*.yml`.
- `k3s_db_connection_string`: Embed this variable in the `k3s.db.`-directory.
Currently causes loop.
- `playbooks/`: Contains the main Ansible playbooks for different setup scenarios.
- `roles/`: Contains the Ansible roles that are used by the playbooks.
- `vars/`: Contains variable files, including group-specific variables.
## Run Playbook
## Playbooks
To run a first playbook and test the setup the following command can be executed.
The following playbooks are available:
- `proxmox.yml`: Provisions VMs and containers on Proxmox VE.
- `k3s-servers.yml`: Sets up the K3s master nodes.
- `k3s-agents.yml`: Sets up the K3s agent nodes.
- `k3s-loadbalancer.yml`: Configures a load balancer for the K3s cluster.
- `k3s-storage.yml`: Configures storage for the K3s cluster.
- `docker.yml`: Sets up Docker hosts and their load balancer.
- `docker-host.yml`: Configures the docker hosts.
- `docker-lb.yml`: Configures a load balancer for Docker services.
- `kubernetes_setup.yml`: A meta-playbook for setting up the entire Kubernetes cluster.
## Roles
The following roles are defined:
- `common`: Common configuration tasks for all nodes.
- `proxmox`: Manages Proxmox VE, including VM and container creation.
- `k3s_server`: Installs and configures K3s master nodes.
- `k3s_agent`: Installs and configures K3s agent nodes.
- `k3s_loadbalancer`: Configures an Nginx-based load balancer for the K3s cluster.
- `k3s_storage`: Configures storage solutions for Kubernetes.
- `docker_host`: Installs and configures Docker.
- `kubernetes_argocd`: Deploys Argo CD to the Kubernetes cluster.
- `node_exporter`: Installs the Prometheus Node Exporter for monitoring.
- `reverse_proxy`: Configures a Caddy-based reverse proxy.
## Usage
1. **Install dependencies:**
```bash
pip install -r requirements.txt
ansible-galaxy install -r requirements.yml
```
2. **Configure variables:**
- Create an inventory file (e.g., `vars/k3s.ini`).
- Adjust variables in `vars/group_vars/` to match your environment.
3. **Run playbooks:**
```bash
# To provision VMs on Proxmox
ansible-playbook -i vars/proxmox.ini playbooks/proxmox.yml
# To set up the K3s cluster
ansible-playbook -i vars/k3s.ini playbooks/kubernetes_setup.yml
```
## Notes
### Vault Git Diff
This repo has a `.gitattributes` which points at the repos ansible-vault files.
These can be temporarily decrypted for git diff by adding this in conjunction with the `.gitattributes`:
```sh
ansible-playbook -i production -J k3s-servers.yml
# https://stackoverflow.com/questions/29937195/how-to-diff-ansible-vault-changes
git config --global diff.ansible-vault.textconv "ansible-vault view"
```
This will run the [./k3s-servers.yml](./k3s-servers.yml) playbook and execute
its roles.
## Disclaimer
## After successful k3s installation
To access our Kubernetes cluster from our host machine to work on it via
flux and such we need to manually copy a k3s config from one of our server nodes to our host machine.
Then we need to install `kubectl` on our host machine and optionally `kubectx` if we're already
managing other Kubernetes instances.
Then we replace the localhost address inside of the config with the IP of our load balancer.
Finally we'll need to set the KUBECONFIG variable.
```sh
mkdir ~/.kube/
scp k3s-server00:/etc/rancher/k3s/k3s.yaml ~/.kube/config
chown $USER ~/.kube/config
sed -i "s/127.0.0.1/192.168.20.22/" ~/.kube/config
export KUBECONFIG=~/.kube/config
```
Install flux and continue in the flux repository.
## Longhorn Nodes
To create longhorn nodes from existing kubernetes nodes we want to increase
their storage capacity. Since we're using VMs for our k3s nodes we can
resize the root-disk of the VMs in the proxmox GUI.
Then we have to resize the partitions inside of the VM so the root partition
uses the newly available space.
When we have LVM-based root partition we can do the following:
```sh
# Create a new partition from the free space.
sudo fdisk /dev/sda
# echo "n\n\n\n\n\nw\n"
# n > 5x\n > w > \n
# Create a LVM volume on the new partition
sudo pvcreate /dev/sda3
sudo vgextend k3s-vg /dev/sda3
# Use the newly available storage in the root volume
sudo lvresize -l +100%FREE -r /dev/k3s-vg/root
```
## Cloud Init VMs
```sh
# On Hypervisor Host
qm resize <vmid> scsi0 +32G
# On VM
sudo fdisk -l /dev/sda # To check
echo 1 | sudo tee /sys/class/block/sda/device/rescan
sudo fdisk -l /dev/sda # To check
# sudo apt-get install cloud-guest-utils
sudo growpart /dev/sda 1
```
This project is highly customized for the author's specific environment. Using it without modification is not recommended.

View File

@@ -6,7 +6,7 @@ interpreter_python=python3
roles_path=./roles
# (pathlist) Comma separated list of Ansible inventory sources
inventory=./inventory
inventory=./vars/
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
# If executable, it will be run and the resulting stdout will be used as the password.
@@ -14,7 +14,7 @@ vault_password_file=/media/veracrypt1/scripts/ansible_vault.sh
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
# This affects vars_files, include_vars, inventory and vars plugins among others.
yaml_valid_extensions=.yml
yaml_valid_extensions=.yaml
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host
host_key_checking=False
@@ -36,3 +36,6 @@ skip=dark gray
[tags]
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=
[inventory]
ignore_extensions={{(REJECT_EXTS + ('.orig', '.cfg', '.retry', '.bak'))}}

View File

@@ -688,4 +688,3 @@
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=

69
blog.md Normal file
View File

@@ -0,0 +1,69 @@
---
title: "Automating My Homelab: From Bare Metal to Kubernetes with Ansible"
date: 2025-07-27
author: "TuDatTr"
tags: ["Ansible", "Proxmox", "Kubernetes", "K3s", "IaC", "Homelab"]
---
## The Homelab: Repeatable, Automated, and Documented
For many tech enthusiasts, a homelab is a playground for learning, experimenting, and self-hosting services. But as the complexity grows, so does the management overhead. Manually setting up virtual machines, configuring networks, and deploying applications becomes a tedious and error-prone process. This lead me to building my homelab as Infrastructure as Code (IaC) with Ansible.
This blog post walks you through my Ansible project, which automates the entire lifecycle of my homelab—from provisioning VMs on Proxmox to deploying a production-ready K3s Kubernetes cluster.
## Why Ansible?
When I decided to automate my infrastructure, I considered several tools. I chose Ansible for its simplicity, agentless architecture, and gentle learning curve. Writing playbooks in YAML felt declarative and intuitive, and the vast collection of community-supported modules meant I wouldn't have to reinvent the wheel.
## The Architecture: A Multi-Layered Approach
My Ansible project is designed to be modular and scalable, with a clear separation of concerns. It's built around a collection of roles, each responsible for a specific component of the infrastructure.
### Layer 1: Proxmox Provisioning
The foundation of my homelab is Proxmox VE. The `proxmox` role is the first step in the automation pipeline. It handles:
- **VM and Container Creation:** Using a simple YAML definition in my `vars` files, I can specify the number of VMs and containers to create, their resources (CPU, memory, disk), and their base operating system images.
- **Cloud-Init Integration:** For VMs, I leverage Cloud-Init to perform initial setup, such as setting the hostname, creating users, and injecting SSH keys for Ansible to connect to.
- **Hardware Passthrough:** The role also configures hardware passthrough for devices like Intel Quick Sync for video transcoding in my media server.
### Layer 2: The K3s Kubernetes Cluster
With the base VMs ready, the next step is to build the Kubernetes cluster. I chose K3s for its lightweight footprint and ease of installation. The setup is divided into several roles:
- `k3s_server`: This role bootstraps the first master node and then adds additional master nodes to create a highly available control plane.
- `k3s_agent`: This role joins the worker nodes to the cluster.
- `k3s_loadbalancer`: A dedicated VM running Nginx is set up to act as a load balancer for the K3s API server, ensuring a stable endpoint for `kubectl` and other clients.
### Layer 3: Applications and Services
Once the Kubernetes cluster is up and running, it's time to deploy applications. My project includes roles for:
- `docker_host`: For services that are better suited to run in a traditional Docker environment, this role sets up and configures Docker hosts.
- `kubernetes_argocd`: I use Argo CD for GitOps-based continuous delivery. This role deploys Argo CD to the cluster and configures it to sync with my application repositories.
- `reverse_proxy`: Caddy is my reverse proxy of choice, and this role automates its installation and configuration, including obtaining SSL certificates from Let's Encrypt.
## Putting It All Together: The Power of Playbooks
The playbooks in the `playbooks/` directory tie everything together. For example, the `kubernetes_setup.yml` playbook runs all the necessary roles in the correct order to bring up the entire Kubernetes cluster from scratch.
```yaml
# playbooks/kubernetes_setup.yml
---
- name: Set up Kubernetes Cluster
hosts: all
gather_facts: true
roles:
- role: k3s_server
- role: k3s_agent
- role: k3s_loadbalancer
- role: kubernetes_argocd
```
## Final Thoughts and Future Plans
This Ansible project has transformed my homelab from a collection of manually configured machines into a fully automated and reproducible environment. I can now tear down and rebuild my entire infrastructure with a single command, which gives me the confidence to experiment without fear of breaking things.
While the project is highly tailored to my specific needs, I hope this overview provides some inspiration for your own automation journey. The principles of IaC and the power of tools like Ansible can be applied to any environment, big or small.
What's next? I plan to explore more advanced Kubernetes concepts, such as Cilium for networking and policy, and integrate more of my self-hosted services into the GitOps workflow with Argo CD. The homelab is never truly "finished," and that's what makes it so much fun.

View File

@@ -1,14 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
65646664663537386235383334613536393336623332363437376337323235636335363165366632
3433623633393731373932306433643663333133393734370a353261353164353335356264643234
65376132336534306465376435303764616136646633303166336136373263346436353235343065
6238353863333239330a303131623262353563323864323536313036356237653936326361366565
62616566396266363535653062636537383061363438303138333237643939323162336465326363
64323830393839386233303634326562386537373766646461376238663963376463623130303363
65366638666132393538336361663639303831333232336632616338396539353565663239373265
38323036343733303131383439323738623263383736303935636339303564343662633437626233
33303564373963646465306137346161656166366266663766356362636362643430393232646635
38363764386538613166306464336532623464343565396431643738353434313838633763663861
35616365383831643434316436313035366131663131373064663464393031623132366137303333
62333561373465323664303539353966663763613365373633373761343966656166363265313134
6163

View File

@@ -1,20 +0,0 @@
#
# Essential
#
root: root
user: tudattr
timezone: Europe/Berlin
puid: "1000"
pgid: "1000"
pk_path: "/media/veracrypt1/genesis"
pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqc9fnzfCz8fQDFzla+D8PBhvaMmFu2aF+TYkkZRxl9 tuan@genesis-2022-01-20"
public_domain: tudattr.dev
internal_domain: seyshiro.de
#
# Packages
#
arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"

View File

@@ -1,502 +0,0 @@
docker:
url: "https://download.docker.com/linux"
apt_release_channel: "stable"
directories:
local: "/opt/local/"
config: "/opt/docker/config/"
compose: "/opt/docker/compose/"
services:
- name: status
vm:
- docker-host12
container_name: kuma
image: louislam/uptime-kuma:1.23.16
volumes:
- name: "Data"
internal: /app/data
external: "{{ docker.directories.local }}/kuma/"
ports:
- name: "http"
internal: 3001
external: "{{ services_external_http.kuma }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: plex
vm:
- docker-host10
container_name: plex
image: lscr.io/linuxserver/plex:1.41.5
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/plex/config/"
- name: "TV Series"
internal: /tv:ro
external: /media/series
- name: "Movies"
internal: /movies:ro
external: /media/movies
- name: "Music"
internal: /music:ro
external: /media/songs
devices:
- name: "Graphics Card"
internal: /dev/dri
external: /dev/dri
ports:
- name: "http"
internal: 32400
external: "{{ services_external_http.plex }}"
- name: ""
internal: 1900
external: 1900
- name: ""
internal: 3005
external: 3005
- name: ""
internal: 5353
external: 5353
- name: ""
internal: 32410
external: 32410
- name: ""
internal: 8324
external: 8324
- name: ""
internal: 32412
external: 32412
- name: ""
internal: 32469
external: 32469
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- VERSION=docker
- name: jellyfin
vm:
- docker-host01
container_name: jellyfin
image: jellyfin/jellyfin:10.10
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/jellyfin/config"
- name: "Cache"
internal: /cache
external: "{{ docker.directories.config }}/jellyfin/cache"
- name: "Tv Series"
internal: /tv:ro
external: /media/series
- name: "Music"
internal: /movies:ro
external: /media/movies
- name: "Music"
internal: /music:ro
external: /media/songs
devices:
- name: "Graphics Card"
internal: /dev/dri
external: /dev/dri
ports:
- name: "http"
internal: 8096
external: "{{ services_external_http.jellyfin }}"
environment:
- name: hass
vm:
- docker-host01
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
privileged: true
volumes:
- name: "Configuration"
internal: /config/
external: "{{ docker.directories.local }}/home-assistant/config/"
- name: "Local Time"
internal: /etc/localtime:ro
external: /etc/localtime
ports:
- name: "http"
internal: 8123
external: "{{ services_external_http.hass }}"
- name: ""
internal: 4357
external: 4357
- name: ""
internal: 5683
external: 5683
- name: ""
internal: 5683
external: 5683
- name: ddns
vm:
- docker-host12
container_name: ddns-updater
image: qmcgaw/ddns-updater:2
volumes:
- name: "Configuration"
internal: /updater/data/
external: "{{ docker.directories.local }}/ddns-updater/data/"
ports:
- name: "http"
internal: 8000
external: "{{ services_external_http.ddns }}"
- name: sonarr
vm:
- docker-host12
container_name: sonarr
image: linuxserver/sonarr:4.0.14
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/sonarr/config"
- name: "Tv Series"
internal: /tv
external: /media/series
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/sonarr
ports:
- name: "http"
internal: 8989
external: "{{ services_external_http.sonarr }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: radarr
vm:
- docker-host12
container_name: radarr
image: linuxserver/radarr:5.21.1
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/radarr/config"
- name: "Movies"
internal: /movies
external: /media/movies
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/radarr
ports:
- name: "http"
internal: 7878
external: "{{ services_external_http.radarr }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: lidarr
vm:
- docker-host12
container_name: lidarr
image: linuxserver/lidarr:2.10.3
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/lidarr/config"
- name: "Music"
internal: /music
external: /media/songs
- name: "Torrent Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads/lidarr
ports:
- name: "http"
internal: 8686
external: "{{ services_external_http.lidarr }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: prowlarr
vm:
- docker-host12
container_name: prowlarr
image: linuxserver/prowlarr:1.32.2
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/prowlarr/config"
ports:
- name: "http"
internal: 9696
external: "{{ services_external_http.prowlarr }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- name: paperless
vm:
- docker-host12
container_name: paperless
image: ghcr.io/paperless-ngx/paperless-ngx:2.14
depends_on:
- paperless-postgres
- paperless-redis
volumes:
- name: "Configuration"
internal: /usr/src/paperless/data
external: "{{ docker.directories.local }}/paperless/data/data"
- name: "Media"
internal: /usr/src/paperless/media
external: "{{ docker.directories.local }}/paperless/data/media"
- name: "Document Export"
internal: /usr/src/paperless/export
external: "{{ docker.directories.local }}/paperless/data/export"
- name: "Document Consume"
internal: /usr/src/paperless/consume
external: "{{ docker.directories.local }}/paperless/data/consume"
environment:
- "PAPERLESS_REDIS=redis://paperless-redis:6379"
- "PAPERLESS_DBHOST=paperless-postgres"
- "PAPERLESS_DBUSER=paperless"
- "PAPERLESS_DBPASS={{ vault.docker.paperless.dbpass }}"
- "USERMAP_UID=1000"
- "USERMAP_GID=1000"
- "PAPERLESS_URL=https://paperless.{{ domain }}"
- "PAPERLESS_TIME_ZONE=Europe/Berlin"
- "PAPERLESS_OCR_LANGUAGE=deu"
ports:
- name: "http"
internal: 8000
external: "{{ services_external_http.paperless }}"
sub_service:
- name: postgres
version: 15
username: paperless
password: "{{ vault.docker.paperless.dbpass }}"
- name: redis
version: 7
- name: pdf
vm:
- docker-host12
container_name: stirling
image: frooodle/s-pdf:0.45.0
ports:
- name: "http"
internal: 8080
external: "{{ services_external_http.pdf }}"
- name: git
vm:
- docker-host01
container_name: gitea
image: gitea/gitea:1.23-rootless
volumes:
- name: "Configuration"
internal: /etc/gitea
external: "{{ docker.directories.local }}/gitea/config"
- name: "Data"
internal: /var/lib/gitea
external: "{{ docker.directories.local }}/gitea/data"
- name: "Time Zone"
internal: /etc/timezone:ro
external: /etc/timezone
- name: "Local Time"
internal: /etc/localtime:ro
external: /etc/localtime
ports:
- name: "http"
internal: 3000
external: "{{ services_external_http.git }}"
- name: "ssh"
internal: 2222
external: 2222
environment:
- USER_UID=1000
- USER_GID=1000
- name: changedetection
vm:
- docker-host12
container_name: changedetection
image: dgtlmoon/changedetection.io:0.49
healthcheck: curl
volumes:
- name: "Data"
internal: /datastore
external: "{{ docker.directories.local }}/changedetection/data/"
ports:
- name: "http"
internal: 5000
external: "{{ services_external_http.changedetection }}"
- name: gluetun
vm:
- docker-host12
container_name: gluetun
image: qmcgaw/gluetun:v3.40
cap_add:
- NET_ADMIN
devices:
- name: "Tunnel"
internal: /dev/net/tun
external: /dev/net/tun
volumes:
- name: "Configuration"
internal: /gluetun
external: "{{ docker.directories.local }}/gluetun/config"
ports:
- name: "Qbit Client"
internal: 8082
external: 8082
- name: "Torrentleech Client"
internal: 8083
external: 8083
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- VPN_SERVICE_PROVIDER=protonvpn
- UPDATER_VPN_SERVICE_PROVIDERS=protonvpn
- UPDATER_PERIOD=24h
- "SERVER_COUNTRIES={{ vault.docker.proton.country }}"
- "OPENVPN_USER={{ vault.docker.proton.openvpn_user }}"
- "OPENVPN_PASSWORD={{ vault.docker.proton.openvpn_password }}"
- name: torrentleech
vm:
- docker-host12
container_name: torrentleech
image: qbittorrentofficial/qbittorrent-nox
depends_on:
- gluetun
network_mode: "container:gluetun"
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/torrentleech/config"
- name: "Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads
ports:
- name: "http"
internal: proxy_only
external: 8083
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- QBT_EULA="accept"
- QBT_WEBUI_PORT="8083"
- name: qbit
vm:
- docker-host12
container_name: qbit
image: qbittorrentofficial/qbittorrent-nox:5.0.4-1
depends_on:
- gluetun
network_mode: "container:gluetun"
volumes:
- name: "Configuration"
internal: /config
external: "{{ docker.directories.local }}/qbit/config"
- name: "Downloads"
internal: /downloads
external: /media/docker/data/arr_downloads
ports:
- name: "http"
internal: proxy_only
external: 8082
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Berlin
- QBT_EULA="accept"
- QBT_WEBUI_PORT="8082"
- name: cadvisor
vm:
- docker-host12
- docker-host10
- docker-host01
container_name: cadvisor
image: gcr.io/cadvisor/cadvisor:v0.52.1
ports:
- name: ""
internal: 8080
external: 8081
volumes:
- name: "Root"
internal: /rootfs:ro
external: /
- name: "Run"
internal: /var/run:rw
external: /var/run
- name: "System"
internal: /sys:ro
external: /sys
- name: "Docker"
internal: /var/lib/docker:ro
external: /var/lib/docker
- name: karakeep
vm:
- docker-host01
container_name: karakeep
image: ghcr.io/karakeep-app/karakeep:0.23.2
ports:
- name: "http"
internal: 3000
external: "{{ services_external_http.karakeep }}"
volumes:
- name: "Data"
internal: /data
external: "{{ docker.directories.local }}/karakeep/config"
environment:
- MEILI_ADDR=http://karakeep-meilisearch:7700
- BROWSER_WEB_URL=http://karakeep-chrome:9222
- NEXTAUTH_SECRET={{ vault.docker.karakeep.nextauth_secret }}
- MEILI_MASTER_KEY={{ vault.docker.karakeep.meili_master_key }}
- NEXTAUTH_URL=https://karakeep.tudattr.dev/
- OPENAI_API_KEY={{ vault.docker.karakeep.openai_key }}
- DATA_DIR=/data
- DISABLE_SIGNUPS=true
sub_service:
- name: meilisearch
version: v1.11.1
nextauth_secret: "{{ vault.docker.karakeep.nextauth_secret }}"
meili_master_key: "{{ vault.docker.karakeep.meili_master_key }}"
openai_key: "{{ vault.docker.karakeep.openai_key }}"
- name: chrome
version: 123
- name: keycloak
vm:
- docker-host01
container_name: keycloak
image: quay.io/keycloak/keycloak:26.2
depends_on:
- keycloak-postgres
ports:
- name: "http"
internal: 8080
external: "{{ services_external_http.keycloak }}"
volumes:
- name: "config"
internal: /opt/keycloak/data/import/homelab-realm.json
external: "{{ docker.directories.local }}/keycloak/homelab-realm.json"
- name: "config"
internal: /opt/keycloak/data/import/master-realm.json
external: "{{ docker.directories.local }}/keycloak/master-realm.json"
command:
- "start"
- "--import-realm"
environment:
- KC_DB=postgres
- KC_DB_URL=jdbc:postgresql://keycloak-postgres:5432/keycloak
- KC_DB_USERNAME={{ keycloak_config.database.username }}
- KC_DB_PASSWORD={{ keycloak_config.database.password }}
- KC_HOSTNAME=keycloak.{{ internal_domain }}
- KC_HTTP_ENABLED=true
- KC_HTTP_RELATIVE_PATH=/
- KC_PROXY=edge
- KC_PROXY_HEADERS=xforwarded
- KC_HOSTNAME_URL=https://keycloak.{{ internal_domain }}
- KC_HOSTNAME_ADMIN_URL=https://keycloak.{{ internal_domain }}
- KC_BOOTSTRAP_ADMIN_USERNAME=serviceadmin-{{ keycloak_admin_hash }}
- KC_BOOTSTRAP_ADMIN_PASSWORD={{ vault.docker.keycloak.admin.password }}
sub_service:
- name: postgres
version: 17
username: "{{ keycloak_config.database.username }}"
password: "{{ keycloak_config.database.password }}"

View File

@@ -1,65 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
62353938306631616432613936343031386266643837393733336533306532643332383761336462
3566663762343161373266353236323532666562383031310a663661316264313737633732313166
35336535353964646238393563333339646634346532633130633364343864363565353461616663
6336343138623762320a366132383634383231316130643535313465356238343534656237626362
38373439663730353739386636313865336262363864323633343839636434353261313432386135
33343438663564323465373435613765306538633339303362656163636237643661623637376135
65346465303530663161356666333062326536313135363536313237616564363838326339646162
62323066626431376231386432333766366434326239303734353036396433333662333733373830
66336433643032636166306332323063393333363734326333363936303033396336626135363832
30636136656235376163613033616563663663633161643937666537333066343135326138643663
64646638393364376466306438383337383231303637313366333638393939373739646338353036
62303162383362393830316163303236336236363531333665353163373530323063313164656562
33383561613530346561336166653536393137346630333262633738383838383338643761666463
61303239636631646634373266303930343437636464326132316534616261376137396233653265
39383137666533613739363764643162663361333465386332383964343534646537343065343833
66643938623734643537313866316335396135613239393262613562356332663861646261373630
34373939663239646534396638636265303438386239636439663635313665613634373832313237
62306366633139333937646534393765663130396466346161376235656461346638323063353662
64386466373433376133343266396537656435333831356531346531653262396330346238623431
61303466366161336664333239663066643232623532643933373661663266366639646139666636
62393532643535656566643862353337333533633861396164643766316637393638363662653863
32643566333961663065383636383436666137356237643634326464636463303530306466616635
36366365636337366335333630306237356366306535613464636463373063653861623464323764
62336139653361376239303632326431643231346137333835356362333962613039643332373166
32316234376431376136666161383039633035356636626664376137323630323966646161313664
38623463376366623430663363663662303166636165646138363631643261376137336636636663
61656631393963353066333930303932653730613431366131616233363662316139663038336538
36383532316162356235373566313832323131326466363734613438323233353330613561383435
39623435366236306431636232323838386462346464653561653638346338613833613133373133
38626364643738373938336237323836646532356539643933333730353333626138646239633234
66316563306230636139323335323665646462343861393366666462623966376431393438376134
37376339356430316235633337376462666439643430303062656538386630613763623433646133
65663530626533663266623861326431633137363466346634656634623166623331306636616666
31643761343632336531356566636165363737646639326533386333646434393736643934643064
39393039346639353439653766326138613164343030306436383461663636346534346365333265
66653535623962653762633934646131653334363232636634303130306632383263373161363462
35323133616665366238353535346561323834353634613730613439643536376337353234313337
61623264616433336532383533376631396438313739616462323064613665396638333438306336
34633338366235336131303462346665663464376334353431343363336662356335356562366532
64366461623864633238666339346138663931363331613463333762336230313530613235303766
34313064383461623230383730623731323533326663613565646436303230653264323061616536
38636162356164656432626433373864326264623063343662323563366133363336313739326137
31326164646364613865396534626533616366613565303032636637366435326336396464313232
66393538393862616466313833326666316231393130666238636130613339663664393434613732
65383363323138343335393636626138303561613532306131666334346631336333336639626466
38343337346566346334383934306433366239666662346463666166643338613264636563653434
36306338313363636665333763323135386165313939336432636339613432323736326635303162
36656234656563376633373333633430313430333834623964653530626539333265363563376239
33633430396338663063383338333732356532313435613737393465323431393035356136306166
62633035653731636361396235613162643332393233326434353831613731373333326464326234
36366166633437356336616166306164343636623962623136653861333866393039653939333037
31343261663534356530373233336165326134613961616331316531313435386464396438363838
31353935666566326630373336376438326366623537356536653564303066343837653030373962
30393363336232646662663166326166386636356466616165376435623031666664373664623330
31613030616162303732353738386434666566386138373238363732303138316533356435656662
38636136353134303166636438663036363834663639613464376662666364386635333138353035
39363236653336386332313930306663366130303836333664363335386331636431623036336535
32366339386539306364343065323263366563643663623731643866346232653838333561336331
36363030383263666137393035356331323038316239356637303665653164363739313664396235
32366231613532323865623861636263383731303164366333303636356633323161653635393830
38616139656264393932353332303264393038396663663236353838343432373965663561333531
36363432323362643634623030356539396562633238653732313739616464643436666130633364
37383764623938626332316630636630343236663338323661333933333730333630353061653061
62656233653439353438

View File

@@ -1,8 +0,0 @@
caddy:
admin_email: me+acme@tudattr.dev
domain: "{{ internal_domain }}"
netcup_api_key: "{{ vault.netcup.api_key }}"
netcup_api_password: "{{ vault.netcup.api_password }}"
netcup_customer_id: "{{ vault.netcup.customer_id }}"

View File

@@ -1,18 +0,0 @@
k3s:
net: "192.168.20.0/24"
server:
ips:
- 192.168.20.21
- 192.168.20.24
- 192.168.20.30
loadbalancer:
ip: 192.168.20.22
default_port: 6443
db:
ip: 192.168.20.23
default_port: "5432"
agent:
ips:
- 192.168.20.25
- 192.168.20.26
- 192.168.20.27

View File

@@ -1,15 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
35333866323538343132373761316430616539643436646637633131366232346566656438303438
3539333661363964633834613161626134323533653737650a613832323436663739663162303066
31333130646631306539356233346632636132346539343734393065353033613865363466646632
6565343937666530330a326130393934326435643837323631653862313232363466643534306131
62376132383137336230366538326364663362346137613930633161663834393835623935373164
65623564633765653137623361376130623363613263313835366464313039613532323661363461
37366438616566643537656639316665363339633737363539636364316335663639303364663366
62653734343364663830633534643931656439313763366138323663373464303137323864313637
65316135343464393031343166366338323839326631623533343931353833643232643339386231
38623735386465383964653663346631376531376261353933346661666131353533633331353437
63336366623333653732306130316264393865633338653238303861646535343837396232366134
63343037636361323239376436326431623165326366383561323832323730636532623039383734
66663139656262643038303435346666323762343661336234663131343531636161636536646465
6530333864323262363536393562346362306161653162346132

View File

@@ -1,20 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
35616266333838306161336339353538306634373132626132643732303066303163343630333630
6338393762616262303038373334663230383464643836370a656538393531393134616463643239
36383330653339393362353838313639333432643535643833396535653632376336613130646663
3532646538363137630a363731613235653935316531616430346264643837306434386333373033
33663135653931373963343734366562386263663939383536663439383537333264666233343233
62626162666538333435396638393338393734656131303065616534613733353335643939333765
38326237343337363064666530303664326563633262313432343030336266373437353837346461
63333363626164316638346635666537613963383537313965373638303732353365623166363736
31633239646262613539646637663664313337353465636366313338303439613638653530656631
62396536316561623736633631623336313537646138383431633538303163303261323864383538
38626338373332653561343036323236383337343037356366626230646432646538373836303063
61346339376561626630653562346439306561643664666437386562356535303264646338326261
33636536663161366635666264663539653037306339316233643662643134396636636162656333
36666139376263646130333263653335333165356462363434373439313330383331356138333431
31633362343639376436616339656561316433346532346533336261383433366366396261366134
35363264373335616165643665653466613434386630373232386261393464376361313131386462
33333531336334386562356338623233313862316232356562373561633364363263306465333439
37386631626538636365376464653837333662363361653237366161316431653266643238346336
363863376530613036313866323965326638

View File

@@ -1,3 +0,0 @@
proxmox_api_user: root
proxmox_api_host: 192.168.20.12
proxmox_api_password: "{{ vault.pve.aya01.root.sudo }}"

View File

@@ -1,80 +0,0 @@
vms:
- name: "docker-host10"
node: "lulu"
vmid: 410
cores: 2
memory: 4096 # in MiB
net:
net0: "virtio,bridge=vmbr0,firewall=1"
boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
ciuser: "{{ user }}"
sshkeys: "{{ pubkey }}"
disk_size: 128 # in Gb
hostpci:
hostpci0: "0000:00:02.0"
- name: "docker-host11"
node: "lulu"
vmid: 411
cores: 2
memory: 4096 # in MiB
net:
net0: "virtio,bridge=vmbr0,firewall=1"
boot_image: "{{ proxmox_cloud_init_images.ubuntu.name }}"
ciuser: "{{ user }}"
sshkeys: "{{ pubkey }}"
disk_size: 128 # in Gb
- name: "docker-host12"
node: "naruto01"
vmid: 412
cores: 4
memory: 8192
net:
net0: "virtio,bridge=vmbr0,firewall=1"
boot_image: "{{ proxmox_cloud_init_images.ubuntu.name }}"
ciuser: "{{ user }}"
sshkeys: "{{ pubkey }}"
disk_size: 128 # in Gb
- name: "k3s-server10"
node: "naruto01"
vmid: 110
cores: 2
memory: 4096 # in MiB
net:
net0: "virtio,bridge=vmbr0,firewall=1"
boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
ciuser: "{{ user }}"
sshkeys: "{{ pubkey }}"
disk_size: 64 # in Gb
# - name: "k3s-agent10"
# node: "naruto01"
# vmid: 210
# cores: 2
# memory: 4096 # in MiB
# net:
# net0: "virtio,bridge=vmbr0,firewall=1"
# boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
# ciuser: "{{ user }}"
# sshkeys: "{{ pubkey }}"
# disk_size: 50 # in Gb
# - name: "k3s-agent11"
# node: "lulu"
# vmid: 211
# cores: 2
# memory: 4096 # in MiB
# net:
# net0: "virtio,bridge=vmbr0,firewall=1"
# boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
# ciuser: "{{ user }}"
# sshkeys: "{{ pubkey }}"
# disk_size: 128 # in Gb
# - name: "k3s-agent12"
# node: "inko"
# vmid: 212
# cores: 2
# memory: 4096 # in MiB
# net:
# net0: "virtio,bridge=vmbr0,firewall=1"
# boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
# ciuser: "{{ user }}"
# sshkeys: "{{ pubkey }}"
# disk_size: 128 # in Gb

View File

@@ -1,13 +0,0 @@
[docker_host]
docker-host01 ansible_become_pass: "{{ vault.docker.host01.sudo }}"
docker-host10
docker-host12
[docker_lb]
docker-lb ansible_become_pass: "{{ vault.docker.lb.sudo }}"
[docker]
[docker:children]
docker_host
docker_lb

View File

@@ -1,21 +0,0 @@
[k3s]
[k3s:children]
k3s_server
k3s_agent
k3s_storage
k3s_storage
k3s_loadbalancer
[k3s_server]
k3s-server10
[k3s_agent]
k3s-agent[10:12]
[k3s_storage]
k3s-longhorn[10:12]
[k3s_loadbalancer]
k3s-loadbalancer

View File

@@ -2,12 +2,10 @@
- name: Set up Servers
hosts: docker_host
gather_facts: true
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
# - role: common
# tags:
# - common
- role: docker_host
tags:
- docker_host

View File

@@ -2,8 +2,6 @@
- name: Set up reverse proxy for docker
hosts: docker
gather_facts: true
vars_files:
- secrets.yml
roles:
- role: common
tags:

5
playbooks/docker.yaml Normal file
View File

@@ -0,0 +1,5 @@
---
- name: Setup Docker Hosts
ansible.builtin.import_playbook: docker-host.yaml
- name: Setup Docker load balancer
ansible.builtin.import_playbook: docker-lb.yaml

View File

@@ -1,5 +0,0 @@
---
- name: Setup Docker Hosts
ansible.builtin.import_playbook: docker-host.yml
- name: Setup Docker load balancer
ansible.builtin.import_playbook: docker-lb.yml

16
playbooks/k3s-agents.yaml Normal file
View File

@@ -0,0 +1,16 @@
- name: Set up Agents
hosts: k3s
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_agent"]
tags:
- common
- role: k3s_agent
when: inventory_hostname in groups["k3s_agent"]
tags:
- k3s_agent
# - role: node_exporter
# when: inventory_hostname in groups["k3s_agent"]
# tags:
# - node_exporter

View File

@@ -1,31 +0,0 @@
- name: Set up Agents
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
roles:
- role: common
when: inventory_hostname in groups["k3s_agent"]
tags:
- common
- role: k3s_agent
when: inventory_hostname in groups["k3s_agent"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_agent
- role: node_exporter
when: inventory_hostname in groups["k3s_agent"]
tags:
- node_exporter

View File

@@ -0,0 +1,17 @@
---
- name: Set up Servers
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["k3s_loadbalancer"]
- role: k3s_loadbalancer
tags:
- k3s_loadbalancer
when: inventory_hostname in groups["k3s_loadbalancer"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_loadbalancer"]

View File

@@ -0,0 +1,17 @@
---
- name: Set up Servers
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["k3s_server"]
- role: k3s_server
tags:
- k3s_server
when: inventory_hostname in groups["k3s_server"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_server"]

View File

@@ -1,16 +0,0 @@
---
- name: Set up Servers
hosts: k3s_server
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: k3s_server
tags:
- k3s_server
- role: node_exporter
tags:
- node_exporter

View File

@@ -0,0 +1,16 @@
- name: Set up storage
hosts: k3s_nodes
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_storage"]
tags:
- common
- role: k3s_storage
when: inventory_hostname in groups["k3s_storage"]
tags:
- k3s_storage
# - role: node_exporter
# when: inventory_hostname in groups["k3s_storage"]
# tags:
# - node_exporter

View File

@@ -1,31 +0,0 @@
- name: Set up storage
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
roles:
- role: common
when: inventory_hostname in groups["k3s_storage"]
tags:
- common
- role: k3s_storage
when: inventory_hostname in groups["k3s_storage"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_storage
- role: node_exporter
when: inventory_hostname in groups["k3s_storage"]
tags:
- node_exporter

View File

@@ -0,0 +1,10 @@
---
- name: Setup Kubernetes Cluster
hosts: kubernetes
any_errors_fatal: true
gather_facts: false
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
roles:
- role: kubernetes_argocd
when: is_localhost

View File

@@ -1,16 +0,0 @@
---
- name: Set up Servers
hosts: loadbalancer
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: loadbalancer
tags:
- loadbalancer
- role: node_exporter
tags:
- node_exporter

View File

@@ -0,0 +1,6 @@
---
- name: Create new VM(s)
ansible.builtin.import_playbook: proxmox.yaml
- name: Provision VM
ansible.builtin.import_playbook: k3s-agents.yaml

View File

@@ -2,8 +2,6 @@
- name: Run proxmox vm playbook
hosts: proxmox
gather_facts: true
vars_files:
- secrets.yml
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
is_proxmox_node: "{{ 'proxmox_nodes' in group_names }}"

View File

@@ -1,7 +1,28 @@
cachetools==5.5.2
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
distlib==0.4.0
durationpy==0.10
filelock==3.18.0
google-auth==2.40.3
identify==2.6.12
idna==3.10
kubernetes==33.1.0
nc-dnsapi==0.1.3
nodeenv==1.9.1
oauthlib==3.3.1
platformdirs==4.3.8
pre_commit==4.2.0
proxmoxer==2.2.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9.1
six==1.17.0
urllib3==2.3.0
virtualenv==20.32.0
websocket-client==1.8.0

5
requirements.yaml Normal file
View File

@@ -0,0 +1,5 @@
---
collections:
- name: community.docker
- name: community.general
- name: kubernetes.core

49
roles/common/README.md Normal file
View File

@@ -0,0 +1,49 @@
# Ansible Role: common
This role configures a baseline set of common configurations for Debian-based systems.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `vars/main.yml`):
```yaml
# The hostname to configure.
hostname: "new-host"
# A list of extra packages to install.
extra_packages:
- "htop"
- "ncdu"
- "stow"
- "unzip"
```
## Dependencies
None.
## Example Playbook
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
```yaml
- hosts: servers
roles:
- role: common
hostname: "my-new-host"
extra_packages:
- "vim"
- "curl"
```
## License
MIT
## Author Information
This role was created in 2025 by [TuDatTr](https://codeberg.org/tudattr/).

View File

@@ -16,4 +16,3 @@ TrustedUserCAKeys /etc/ssh/vault-ca.pub
UseDNS yes
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@@ -3,4 +3,4 @@
service:
name: sshd
state: restarted
become: yes
become: true

View File

@@ -79,12 +79,13 @@
path: ~/.config/nvim
register: nvim_config
- name: Clone LazyVim starter to Neovim config directory
- name: Clone personal Neovim config directory
ansible.builtin.git:
repo: https://github.com/LazyVim/starter
repo: https://codeberg.org/tudattr/nvim
dest: ~/.config/nvim
clone: true
update: false
version: 1.0.0
when: not nvim_config.stat.exists
- name: Remove .git directory from Neovim config

View File

@@ -0,0 +1,13 @@
---
- name: Configure Time
ansible.builtin.include_tasks: time.yaml
- name: Configure Packages
ansible.builtin.include_tasks: packages.yaml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yaml
- name: Configure Extra-Packages
ansible.builtin.include_tasks: extra_packages.yaml
- name: Configure Bash
ansible.builtin.include_tasks: bash.yaml
- name: Configure SSH
ansible.builtin.include_tasks: sshd.yaml

View File

@@ -1,13 +0,0 @@
---
- name: Configure Time
ansible.builtin.include_tasks: time.yml
- name: Configure Packages
ansible.builtin.include_tasks: packages.yml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yml
- name: Configure Extra-Packages
ansible.builtin.include_tasks: extra_packages.yml
- name: Configure Bash
ansible.builtin.include_tasks: bash.yml
- name: Configure SSH
ansible.builtin.include_tasks: sshd.yml

View File

@@ -1,11 +1,11 @@
---
- name: Set timezone to "{{ timezone }}"
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
become: true
when: ansible_user_id != "root"
- name: Set timezone to "{{ timezone }}"
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
when: ansible_user_id == "root"

View File

@@ -13,3 +13,6 @@ common_packages:
- bat
- fd-find
- ripgrep
- nfs-common
- open-iscsi
- parted

View File

@@ -0,0 +1,85 @@
# Ansible Role: Docker Host
This role sets up a Docker host, installs Docker, and configures it according to the provided variables. It also handles user and group management, directory setup, and deployment of Docker Compose services.
## Role Variables
### General
- `docker_host_package_common_dependencies`: A list of common packages to be installed on the host.
- Default: `nfs-common`, `firmware-misc-nonfree`, `linux-image-amd64`
- `apt_lock_files`: A list of apt lock files to check.
- `arch`: The architecture of the host.
- Default: `arm64` if `ansible_architecture` is `aarch64`, otherwise `amd64`.
### Docker
- `docker.url`: The URL for the Docker repository.
- Default: `https://download.docker.com/linux`
- `docker.apt_release_channel`: The Docker apt release channel.
- Default: `stable`
- `docker.directories.local`: The local directory for Docker data.
- Default: `/opt/local`
- `docker.directories.config`: The directory for Docker configurations.
- Default: `/opt/config`
- `docker.directories.compose`: The directory for Docker Compose files.
- Default: `/opt/compose`
### Keycloak
- `keycloak_config`: A dictionary containing the Keycloak configuration. See `templates/keycloak/realm.json.j2` for more details.
### Services
- `services`: A list of dictionaries, where each dictionary represents a Docker Compose service. See `templates/compose.yaml.j2` for more details.
## Tasks
The role performs the following tasks:
1. **Setup VM**:
- Includes `non-free` and `non-free-firmware` components in the apt sources.
- Installs common packages.
- Removes cloud kernel packages.
- Reboots the host.
2. **Install Docker**:
- Uninstalls old Docker versions.
- Installs dependencies for using repositories over HTTPS.
- Adds the Docker apt key and repository.
- Installs Docker Engine, containerd, and Docker Compose.
3. **Setup user and group for Docker**:
- Ensures the `docker` group exists.
- Adds the `ansible_user_id` to the `docker` group.
- Reboots the host.
4. **Setup directory structure for Docker**:
- Creates necessary directories for Docker and media.
- Sets ownership of the directories.
- Mounts NFS shares.
5. **Deploy configs**:
- Sets up Keycloak realms if the host is a Keycloak host.
6. **Deploy Docker Compose**:
- Copies the Docker Compose file to the target host.
7. **Publish metrics**:
- Copies the `daemon.json` file to `/etc/docker/daemon.json` to enable metrics.
## Handlers
- `Restart docker`: Restarts the Docker service.
- `Restart compose`: Restarts the Docker Compose services.
- `Restart host`: Reboots the host.
## Usage
To use this role, include it in your playbook and set the required variables.
```yaml
- hosts: docker_hosts
roles:
- role: docker_host
vars:
# Your variables here
```
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details.

View File

@@ -26,6 +26,7 @@
- curl
- gnupg
- lsb-release
- qemu-guest-agent
become: true
- name: Add Docker apt key.

View File

@@ -5,7 +5,6 @@
state: directory
mode: "0755"
loop:
- /media/docker
- /media/series
- /media/movies
- /media/songs
@@ -38,4 +37,5 @@
- /media/series
- /media/movies
- /media/songs
- /media/downloads
become: true

View File

@@ -0,0 +1,21 @@
---
- name: Setup VM
ansible.builtin.include_tasks: 10_setup.yaml
- name: Install docker
ansible.builtin.include_tasks: 20_installation.yaml
- name: Setup user and group for docker
ansible.builtin.include_tasks: 30_user_group_setup.yaml
- name: Setup directory structure for docker
ansible.builtin.include_tasks: 40_directory_setup.yaml
# - name: Deploy configs
# ansible.builtin.include_tasks: 50_provision.yaml
- name: Deploy docker compose
ansible.builtin.include_tasks: 60_deploy_compose.yaml
- name: Publish metrics
ansible.builtin.include_tasks: 70_export.yaml

View File

@@ -1,20 +0,0 @@
---
- name: Setup VM
ansible.builtin.include_tasks: 10_setup.yml
- name: Install docker
ansible.builtin.include_tasks: 20_installation.yml
- name: Setup user and group for docker
ansible.builtin.include_tasks: 30_user_group_setup.yml
- name: Setup directory structure for docker
ansible.builtin.include_tasks: 40_directory_setup.yml
- name: Deploy configs
ansible.builtin.include_tasks: 50_provision.yml
- name: Deploy docker compose
ansible.builtin.include_tasks: 60_deploy_compose.yml
- name: Publish metrics
ansible.builtin.include_tasks: 70_export.yml

View File

@@ -1,7 +1,5 @@
docker_host_package_common_dependencies:
- nfs-common
- firmware-misc-nonfree
- linux-image-amd64
apt_lock_files:
- /var/lib/dpkg/lock

39
roles/k3s_agent/README.md Normal file
View File

@@ -0,0 +1,39 @@
# K3s Agent Ansible Role
This Ansible role installs and configures a K3s agent on a node.
## Role Variables
- `k3s.loadbalancer.default_port`: The port for the K3s load balancer. Defaults to `6443`.
- `k3s_token`: The token for joining the K3s cluster. This is a required variable.
- `hostvars['k3s-loadbalancer'].ansible_default_ipv4.address`: The IP address of the K3s load balancer. This is a required variable.
## Tasks
The main tasks are in `tasks/main.yml` and `tasks/installation.yml`.
- **`installation.yml`**:
- Installs `qemu-guest-agent`.
- Checks if K3s is already installed.
- Downloads the K3s installation script to `/tmp/k3s_install.sh`.
- Installs K3s as an agent, connecting to the master.
## Handlers
The main handlers are in `handlers/main.yml`.
- **`Restart k3s`**: Restarts the `k3s` service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
---
- hosts: k3s_agents
roles:
- role: k3s_agent
vars:
k3s_token: "your_k3s_token"
k3s.loadbalancer.default_port: 6443
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -1,4 +1,12 @@
---
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
@@ -11,11 +19,11 @@
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s on the secondary servers
- name: Install K3s on agent
when: not k3s_status.stat.exists
ansible.builtin.command: |
/tmp/k3s_install.sh
environment:
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
K3S_TOKEN: "{{ k3s_token }}"
become: true

View File

@@ -0,0 +1,3 @@
---
- name: Install k3s agent
include_tasks: installation.yaml

View File

@@ -1,2 +0,0 @@
---
- include_tasks: installation.yml

View File

@@ -0,0 +1,50 @@
# K3s Loadbalancer Ansible Role
This Ansible role configures a load balancer for a K3s cluster using Nginx.
## Role Variables
- `k3s_loadbalancer_nginx_config_path`: The path to the Nginx configuration file. Defaults to `/etc/nginx/nginx.conf`.
- `domain`: The domain name to use for the load balancer. Defaults to `{{ internal_domain }}`.
- `k3s.loadbalancer.default_port`: The default port for the K3s API server. Defaults to `6443`.
- `k3s_server_ips`: A list of IP addresses for the K3s server nodes. This variable is not defined in the role, so you must provide it.
- `netcup_api_key`: Your Netcup API key.
- `netcup_api_password`: Your Netcup API password.
- `netcup_customer_id`: Your Netcup customer ID.
## Tasks
The role performs the following tasks:
- **Installation:**
- Updates the `apt` cache.
- Installs `qemu-guest-agent`.
- Installs `nginx-full`.
- **Configuration:**
- Templates the Nginx configuration file with dynamic upstreams for the K3s servers.
- Enables and starts the Nginx service.
- **DNS Setup:**
- Sets up a DNS A record for the load balancer using the `community.general.netcup_dns` module.
## Handlers
- `Restart nginx`: Restarts the Nginx service when the configuration file is changed.
## Example Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_loadbalancer
roles:
- role: k3s_loadbalancer
vars:
k3s_server_ips:
- 192.168.1.10
- 192.168.1.11
- 192.168.1.12
netcup_api_key: "your_api_key"
netcup_api_password: "your_api_password"
netcup_customer_id: "your_customer_id"
internal_domain: "example.com"
```

View File

@@ -2,15 +2,13 @@
- name: Template the nginx config file with dynamic upstreams
ansible.builtin.template:
src: templates/nginx.conf.j2
dest: "{{ nginx_config_path }}"
dest: "{{ k3s_loadbalancer_nginx_config_path }}"
owner: root
group: root
mode: "0644"
become: true
notify:
- Restart nginx
vars:
k3s_server_ips: "{{ k3s.server.ips }}"
- name: Enable nginx
ansible.builtin.systemd:

View File

@@ -4,6 +4,14 @@
update_cache: true
become: true
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: Install Nginx
ansible.builtin.apt:
name:

View File

@@ -0,0 +1,17 @@
---
- name: Installation
ansible.builtin.include_tasks: installation.yaml
- name: Configure
ansible.builtin.include_tasks: configuration.yaml
- name: Setup DNS on Netcup
community.general.netcup_dns:
api_key: "{{ netcup_api_key }}"
api_password: "{{ netcup_api_password }}"
customer_id: "{{ netcup_customer_id }}"
domain: "{{ domain }}"
name: "k3s"
type: "A"
value: "{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}"
delegate_to: localhost

View File

@@ -0,0 +1,98 @@
include /etc/nginx/modules-enabled/*.conf;
events {}
stream {
upstream k3s_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:{{ k3s.loadbalancer.default_port }};
{% endfor %}
}
server {
listen {{k3s.loadbalancer.default_port}};
proxy_pass k3s_servers;
}
upstream etcd_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:2379;
{% endfor %}
}
server {
listen 2379;
proxy_pass etcd_servers;
}
upstream dns_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:53;
{% endfor %}
}
server {
listen 53 udp;
proxy_pass dns_servers;
}
}
# http {
# upstream k3s_servers_http {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:80;
# {% endfor %}
# }
#
# upstream k3s_servers_https {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:443;
# {% endfor %}
# }
#
# server {
# listen 80;
#
# location / {
# proxy_pass http://k3s_servers_http;
# proxy_set_header Host $http_host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto http;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name staging.k3s.seyshiro.de *.staging.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/staging_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/staging_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name k3s.seyshiro.de *.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/production_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/production_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
# }

View File

@@ -0,0 +1,3 @@
k3s_loadbalancer_nginx_config_path: "/etc/nginx/nginx.conf"
domain: "{{ internal_domain }}"

View File

@@ -0,0 +1,49 @@
# K3s Server Ansible Role
This Ansible role installs and configures a K3s server cluster.
## Role Variables
- `k3s_primary_server_ip`: The IP address of the primary K3s server.
- `k3s_server_name`: The server name for the K3s cluster.
- `k3s_cluster_name`: The name for the K3s cluster in the kubeconfig.
- `k3s_user_name`: The user name for the K3s cluster in the kubeconfig.
- `k3s_context_name`: The context name for the K3s cluster in the kubeconfig.
- `k3s_server_token_vault_file`: The path to the Ansible Vault file containing the K3s token. Default is `../vars/group_vars/k3s/secrets_token.yml`.
## Tasks
The main tasks are:
1. **Install dependencies**: Installs `qemu-guest-agent`.
2. **Primary Server Installation**:
- Downloads the K3s installation script.
- Installs the K3s server on the primary node with a TLS SAN.
3. **Pull Token**:
- Retrieves the K3s token from the primary server.
- Stores the token in an Ansible Vault encrypted file.
4. **Secondary Server Installation**:
- Installs K3s on the secondary servers, joining them to the cluster using the token from the vault.
5. **Create Kubeconfig**:
- Slurps the `k3s.yaml` from the primary server.
- Creates a kubeconfig file on the local machine for accessing the cluster.
## Handlers
- `Restart k3s`: Restarts the K3s service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_servers
roles:
- role: k3s_server
vars:
k3s_primary_server_ip: "192.168.1.100"
k3s_server_name: "k3s.example.com"
k3s_cluster_name: "my-k3s-cluster"
k3s_user_name: "my-k3s-user"
k3s_context_name: "my-k3s-context"
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -0,0 +1,87 @@
---
- name: Slurp original k3s.yaml from primary K3s server
ansible.builtin.slurp:
src: /etc/rancher/k3s/k3s.yaml
register: original_k3s_kubeconfig_slurp
become: true
- name: Parse original k3s.yaml content to extract cert data
ansible.builtin.set_fact:
original_parsed_k3s_kubeconfig: "{{ original_k3s_kubeconfig_slurp.content | b64decode | from_yaml }}"
delegate_to: localhost
run_once: true
- name: Set facts for certificate and key data needed by the template
ansible.builtin.set_fact:
k3s_server_ca_data: "{{ original_parsed_k3s_kubeconfig.clusters[0].cluster['certificate-authority-data'] }}"
k3s_client_cert_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-certificate-data'] }}"
k3s_client_key_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-key-data'] }}"
delegate_to: localhost
run_once: true
- name: Decode and save K3s Server CA certificate
ansible.builtin.copy:
content: "{{ k3s_server_ca_data | b64decode }}"
dest: "/tmp/k3s-ca.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client certificate
ansible.builtin.copy:
content: "{{ k3s_client_cert_data | b64decode }}"
dest: "/tmp/k3s-client.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client key
ansible.builtin.copy:
content: "{{ k3s_client_key_data | b64decode }}"
dest: "/tmp/k3s-client.key"
mode: "0600"
delegate_to: localhost
become: false
- name: Add K3s cluster to kubeconfig
ansible.builtin.command: >
kubectl config set-cluster "{{ k3s_cluster_name }}"
--server="https://{{ k3s_server_name }}:6443"
--certificate-authority=/tmp/k3s-ca.crt
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s user credentials to kubeconfig
ansible.builtin.command: >
kubectl config set-credentials "{{ k3s_user_name }}"
--client-certificate=/tmp/k3s-client.crt
--client-key=/tmp/k3s-client.key
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s context to kubeconfig
ansible.builtin.command: >
kubectl config set-context "{{ k3s_context_name }}"
--cluster="{{ k3s_cluster_name }}"
--user="{{ k3s_user_name }}"
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Clean up temporary certificate and key files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s-ca.crt"
- "/tmp/k3s-client.crt"
- "/tmp/k3s-client.key"
delegate_to: localhost
become: false

View File

@@ -1,55 +0,0 @@
---
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Download K3s install script to /tmp/
when: not k3s_status.stat.exists
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s server with node taint and TLS SAN
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
become: true
async: 300
poll: 0
register: k3s_primary_install
- name: Wait for K3s to be installed
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.async_status:
jid: "{{ k3s_primary_install.ansible_job_id }}"
register: k3s_primary_install_status
until: k3s_primary_install_status.finished
retries: 60
delay: 5
become: true
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0]
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0]
ansible.builtin.set_fact:
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
- name: Install K3s on the secondary servers
when: (host.ip != k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
environment:
K3S_TOKEN: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
become: true

View File

@@ -0,0 +1,29 @@
---
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
update_cache: true
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Install primary k3s server
include_tasks: primary_installation.yaml
when: ansible_default_ipv4.address == k3s_primary_server_ip
- name: Get token from primary k3s server
include_tasks: pull_token.yaml
- name: Install seconary k3s servers
include_tasks: secondary_installation.yaml
when: ansible_default_ipv4.address != k3s_primary_server_ip
- name: Set kubeconfig on localhost
include_tasks: create_kubeconfig.yaml
when: ansible_default_ipv4.address == k3s_primary_server_ip

View File

@@ -1,2 +0,0 @@
---
- include_tasks: installation.yml

View File

@@ -0,0 +1,14 @@
---
- name: Download K3s install script to /tmp/
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s server with and TLS SAN
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--cluster-init
--tls-san {{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }} \
--tls-san {{ k3s_server_name }}
become: true

View File

@@ -0,0 +1,26 @@
- name: Get K3s token from the first server
when: ansible_default_ipv4.address == k3s_primary_server_ip
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s_primary_server_ip
ansible.builtin.set_fact:
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
when:
- ansible_default_ipv4.address == k3s_primary_server_ip
- name: Write K3s token to local file for encryption
ansible.builtin.copy:
content: |
k3s_token: "{{ k3s_token }}"
dest: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
mode: "0600"
delegate_to: localhost
run_once: true
- name: Encrypt k3s token
ansible.builtin.shell: cd ../; ansible-vault encrypt "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
delegate_to: localhost
run_once: true

View File

@@ -0,0 +1,21 @@
---
- name: Add token vault
ansible.builtin.include_vars:
file: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
name: k3s_token_vault
- name: Download K3s install script to /tmp/
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s on the secondary servers
ansible.builtin.command: |
/tmp/k3s_install.sh \
--server "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}" \
--tls-san {{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }} \
--tls-san {{ k3s_server_name }}
environment:
K3S_TOKEN: "{{ k3s_token_vault.k3s_token }}"
become: true

View File

@@ -0,0 +1 @@
k3s_server_token_vault_file: ../vars/group_vars/k3s/secrets_token.yaml

View File

@@ -0,0 +1,39 @@
# k3s_storage Ansible Role
This role installs and configures a k3s node with storage-specific taints and labels.
## Role Variables
- `k3s.loadbalancer.default_port`: The port of the k3s loadbalancer. Defaults to `6443`.
- `k3s_token`: The token to join the k3s cluster. This is a required variable.
- `hostvars['k3s-loadbalancer'].ansible_default_ipv4.address`: The IP address of the k3s loadbalancer. This is discovered automatically from the host with the name `k3s-loadbalancer`.
## Tasks
The main task includes the following files:
- `requirements.yml`:
- Updates and upgrades system packages.
- Installs `open-iscsi` and `nfs-common`.
- Starts and enables the `open-iscsi` service.
- `installation.yml`:
- Downloads the k3s installation script.
- Installs k3s on the node with the following configurations:
- Taint: `storage=true:NoExecute`
- Label: `longhorn=true`
## Handlers
- `Restart k3s`: Restarts the k3s service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_storage_nodes
roles:
- role: k3s_storage
vars:
k3s_token: "your_k3s_token"
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -18,6 +18,6 @@
--node-taint storage=true:NoExecute \
--node-label longhorn=true
environment:
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
K3S_TOKEN: "{{ k3s_token }}"
become: true

View File

@@ -0,0 +1,5 @@
---
- name: Install dependencies
ansible.builtin.include_tasks: requirements.yaml
- name: Install k3s
ansible.builtin.include_tasks: installation.yaml

View File

@@ -1,5 +0,0 @@
---
- name: Install dependencies
ansible.builtin.include_tasks: requirements.yml
- name: Install k3s
ansible.builtin.include_tasks: installation.yml

View File

@@ -0,0 +1,44 @@
# Ansible Role: ArgoCD
This role installs and configures ArgoCD in a Kubernetes cluster.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
| Variable | Default | Description |
|---|---|---|
| `argocd_version` | `stable` | The version of ArgoCD to install. |
| `argocd_namespace` | `argocd` | The namespace where ArgoCD will be installed. |
| `argocd_repo` | `https://raw.githubusercontent.com/argoproj/argo-cd/refs/tags/{{ argocd_version }}/manifests/ha/install.yaml` | The URL to the ArgoCD installation manifests. |
| `argocd_git_repository` | `https://github.com/argocd/argocd` | The Git repository URL for ArgoCD to use. |
| `argocd_git_username` | `user` | The username for the Git repository. |
| `argocd_git_pat` | `token` | The personal access token for the Git repository. |
## Tasks
The following tasks are performed by this role:
- **Install ArgoCD**: Creates the ArgoCD namespace and applies the installation manifests.
- **Apply ArgoCD Ingress**: Applies an Ingress resource for the ArgoCD server. **Note:** The template file `ingress.yml.j2` is missing from the role.
- **Apply ArgoCD CM**: Applies a ConfigMap with command parameters for ArgoCD.
- **Apply ArgoCD repository**: Creates a Secret with Git repository credentials.
- **Apply ArgoCD Root Application**: Creates a root Application resource for ArgoCD.
## Handlers
There are no handlers in this role.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: kubernetes
roles:
- role: kubernetes_argocd
vars:
argocd_git_repository: "https://github.com/my-org/my-repo.git"
argocd_git_username: "my-user"
argocd_git_pat: "my-token"
```

View File

@@ -0,0 +1,8 @@
---
argocd_version: stable
argocd_namespace: argocd
argocd_repo: "https://raw.githubusercontent.com/argoproj/argo-cd/refs/tags/{{ argocd_version }}/manifests/ha/install.yaml"
argocd_git_repository: https://github.com/argocd/argocd
argocd_git_username: "user"
argocd_git_pat: "token"

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cmd-params-cm
labels:
app.kubernetes.io/name: argocd-cmd-params-cm
app.kubernetes.io/part-of: argocd
data:
redis.server: "argocd-redis-ha-haproxy:6379"
server.insecure: "true"

View File

@@ -0,0 +1,72 @@
---
- name: Install ArgoCD
block:
- name: Create ArgoCD namespace
kubernetes.core.k8s:
name: "{{ argocd_namespace }}"
api_version: v1
kind: Namespace
state: present
- name: Apply ArgoCD manifests
kubernetes.core.k8s:
src: "{{ argocd_repo }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Wait for ArgoCD server to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
name: argocd-server
namespace: "{{ argocd_namespace }}"
register: rollout_status
until: >
rollout_status.resources[0].status.readyReplicas is defined and
rollout_status.resources[0].status.readyReplicas == rollout_status.resources[0].spec.replicas
retries: 30
delay: 10
- name: Apply ArgoCD Ingress
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'ingress.yaml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD CM
kubernetes.core.k8s:
src: "files/argocd-cmd-params-cm.yaml"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD repository
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'repository.yaml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD Root Application
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'root_application.yaml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-repository-https
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
url: {{ argocd_git_repository }}
username: {{ argocd_git_username }}
password: {{ argocd_git_pat }}

View File

@@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: homelab-gitops-root
namespace: argocd
labels:
app.kubernetes.io/name: argocd-root
app.kubernetes.io/instance: homelab
app.kubernetes.io/component: gitops-bootstrap
spec:
project: default
source:
repoURL: {{ argocd_git_repository }}
targetRevision: HEAD
path: cluster-apps/
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ApplyOutOfSyncOnly=true

View File

@@ -1,5 +0,0 @@
---
- name: Installation
ansible.builtin.include_tasks: installation.yml
- name: Configure
ansible.builtin.include_tasks: configuration.yml

View File

@@ -1,89 +0,0 @@
include /etc/nginx/modules-enabled/*.conf;
events {}
stream {
# TCP Load Balancing for the K3s API
upstream k3s_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:{{k3s.loadbalancer.default_port}};
{% endfor %}
}
server {
listen {{k3s.loadbalancer.default_port}};
proxy_pass k3s_servers;
}
upstream dns_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:53;
{% endfor %}
}
server {
listen 53 udp;
proxy_pass dns_servers;
}
}
http {
upstream k3s_servers_http {
least_conn;
{% for ip in k3s_server_ips %}
server {{ ip }}:80;
{% endfor %}
}
upstream k3s_servers_https {
least_conn;
{% for ip in k3s_server_ips %}
server {{ ip }}:443;
{% endfor %}
}
server {
listen 80;
location / {
proxy_pass http://k3s_servers_http;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto http;
}
}
server {
listen 443 ssl;
server_name staging.k3s.seyshiro.de *.staging.k3s.seyshiro.de;
ssl_certificate /etc/nginx/ssl/staging_tls.crt;
ssl_certificate_key /etc/nginx/ssl/staging_tls.key;
location / {
proxy_pass https://k3s_servers_https;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}
server {
listen 443 ssl;
server_name k3s.seyshiro.de *.k3s.seyshiro.de;
ssl_certificate /etc/nginx/ssl/production_tls.crt;
ssl_certificate_key /etc/nginx/ssl/production_tls.key;
location / {
proxy_pass https://k3s_servers_https;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
}
}
}

View File

@@ -1 +0,0 @@
nginx_config_path: "/etc/nginx/nginx.conf"

Some files were not shown because too many files have changed in this diff Show More