34 Commits

Author SHA1 Message Date
Tuan-Dat Tran
6eef96b302 feat(pre-commit): Added linting 2025-07-27 22:46:23 +02:00
Tuan-Dat Tran
2882abfc0b Added README.md for roles
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:40:46 +02:00
Tuan-Dat Tran
2b759cc2ab Update README.md
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 16:16:35 +02:00
Tuan-Dat Tran
dbaebaee80 cleanup: services moved to argocd
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-27 13:58:25 +02:00
Tuan-Dat Tran
89c51aa45c feat(argo): app-of-app argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-25 07:58:41 +02:00
Tuan-Dat Tran
0139850ee3 feat(reverse_proxy): fix caddy letsencrypt
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 21:26:11 +02:00
Tuan-Dat Tran
976cad51e2 refactor(k3s): enhance cluster setup and enable ArgoCD apps
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-22 07:23:23 +02:00
Tuan-Dat Tran
e1a2248154 feat(kubernetes): add nfs-provisioner
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-15 23:24:52 +02:00
Tuan-Dat Tran
d8fd094379 feat(kubernetes): stable kubernetes with argo
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-14 22:57:13 +02:00
Tuan-Dat Tran
76000f8123 feat(kubernetes): add initial setup for ArgoCD, Cert-Manager, MetalLB, and Traefik
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 14:25:53 +02:00
Tuan-Dat Tran
4aa939426b refactor(k3s): enhance kubeconfig generation and token management
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 09:33:39 +02:00
Tuan-Dat Tran
9cce71f73b refactor(k3s): manage token securely and install guest agent
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 02:15:01 +02:00
Tuan-Dat Tran
97a5d6c41d refactor(k3s): centralize k3s primary server IP and integrate Netcup DNS
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 01:30:05 +02:00
Tuan-Dat Tran
f1b0cfad2c refactor(k3s): streamline inventory and primary server IP handling
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-13 00:40:48 +02:00
Tuan-Dat Tran
dac0d88d60 feat(proxmox): add k3s agents and refine VM provisioning
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 23:08:44 +02:00
Tuan-Dat Tran
609e000089 refactor(ansible): centralize inventory and variables in 'vars' directory
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 21:38:53 +02:00
Tuan-Dat Tran
3d7f652ff3 refactor(ansible): restructure inventory and remove postgres role
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-07-12 20:35:26 +02:00
Tuan-Dat Tran
cb8ccd8f00 wip
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-06-07 01:19:27 +02:00
Tuan-Dat Tran
02168225b1 wip
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-06-07 00:16:54 +02:00
Tuan-Dat Tran
6ff1ccecd0 refactor(infra): reorganize docker host VMs and service assignments
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-07 00:02:30 +02:00
Tuan-Dat Tran
de62327fde Add naruto01 to proxmox nodes
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-06 13:33:46 +02:00
Tuan-Dat Tran
b70c8408dc 2025-05-03T21:41+02:00
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-03 21:41:32 +02:00
Tuan-Dat Tran
a913e1cbc0 refactor: reorganize proxmox roles, add hardware acceleration, and update common config tasks
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-05-03 10:24:50 +02:00
Tuan-Dat Tran
e3c67a32e9 feat(reverse_proxy): add Netcup DNS ACME challenge support and refactor Caddy setup
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-28 23:24:29 +02:00
Tuan-Dat Tran
8f2998abc0 refactor(ansible): use ansible_user_id and add root package condition
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-27 18:15:07 +02:00
Tuan-Dat Tran
7fcee3912f refactor(ansible): refactor common role application and improve vm ssh config
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-27 17:46:41 +02:00
Tuan-Dat Tran
591342f580 feat(proxmox): refactor vm provisioning and add pci passthrough config
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 23:34:42 +02:00
Tuan-Dat Tran
f2ea03bc01 feat(proxmox): automatic vm creation
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 21:58:58 +02:00
Tuan-Dat Tran
0e8e07ed3e feat(docker): Added healthcheck
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-26 13:21:02 +02:00
Tuan-Dat Tran
a2a58f6343 feat(keycloak|docker): improved templating
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-25 23:37:24 +02:00
Tuan-Dat Tran
42196a32dc feat(docker): Add karakeep and keycloak services
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-24 20:24:33 +02:00
Tuan-Dat Tran
6934a9f5fc distributed secrets to group_vars and added karakeep
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-06 23:46:28 +02:00
Tuan-Dat Tran
27621aac03 Added proxmox-vm and static tagging of docker images
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-04-06 18:04:33 +02:00
Tuan-Dat Tran
56f058c254 moved ssh to cert based
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
2025-03-25 01:09:08 +01:00
175 changed files with 2907 additions and 1237 deletions

31
.ansible-lint Normal file
View File

@@ -0,0 +1,31 @@
---
# .ansible-lint
# Specify exclude paths to prevent linting vendor roles, etc.
exclude_paths:
- ./.git/
- ./.venv/
- ./galaxy_roles/
# A list of rules to skip. This is a more modern and readable alternative to 'skip_list'.
skip_list:
- experimental
- fqcn-builtins
- no-handler
- var-naming
# Enforce certain rules that are not enabled by default.
enable_list:
- no-free-form
- var-spacing
- no-log-password
- no-relative-path
- command-instead-of-module
- fqcn[deep]
- no-changed-when
# Offline mode disables any features that require internet access.
offline: false
# Set the desired verbosity level.
verbosity: 1

17
.editorconfig Normal file
View File

@@ -0,0 +1,17 @@
root = true
[*]
indent_style = space
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.{yml,yaml}]
indent_size = 2
[*.py]
indent_size = 4
[*.md]
trim_trailing_whitespace = false

23
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,23 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: local
hooks:
- id: ansible-galaxy-install
name: Install ansible-galaxy collections
entry: ansible-galaxy collection install -r requirements.yml
language: system
pass_filenames: false
always_run: true
- repo: https://github.com/ansible/ansible-lint
rev: v6.22.2
hooks:
- id: ansible-lint
files: \.(yaml|yml)$
additional_dependencies:
- ansible-core==2.15.8

View File

@@ -2,36 +2,41 @@
**I do not recommend this project being used for ones own infrastructure, as
this project is heavily attuned to my specific host/network setup**
The Ansible Project to provision fresh Debian VMs for my Proxmox instances.
Some values are hard coded such as the public key both in
[./scripts/debian_seed.sh](./scripts/debian_seed.sh) and [./group_vars/all/vars.yml](./group_vars/all/vars.yml).
## Prerequisites
## Configuration
- [secrets.yml](secrets.yml) in the root directory of this repository.
Skeleton file can be found as [./secrets.yml.skeleton](./secrets.yml.skeleton).
- IP Configuration of hosts like in [./host_vars/\*](./host_vars/*)
- Setup [~/.ssh/config](~/.ssh/config) for the respective hosts used.
- Install `passlib` for your operating system. Needed to hash passwords ad-hoc.
The configuration of this project is done via files in the `./vars` directory.
The inventory is composed of `.ini` files in the `./vars` directory. Each `.ini` file represents an inventory and can be used with the `-i` flag when running playbooks.
## Improvable Variables
The variables for the hosts and groups are defined in the `./vars/group_vars` directory. The structure of this directory is as follows:
- `group_vars/k3s/vars.yml`:
- `k3s.server.ips`: Take list of IPs from host_vars `k3s_server*.yml`.
- `k3s_db_connection_string`: Embed this variable in the `k3s.db.`-directory.
Currently causes loop.
```
vars/
├── group_vars/
├── all/
│ │ ├── secrets.yml
│ │ └── vars.yml
│ ├── <group_name>/
│ │ ├── *.yml
├── docker.ini
├── k3s.ini
├── kubernetes.ini
├── proxmox.ini
└── vps.ini
```
The `all` group contains variables that are common to all hosts. Each other directory in `group_vars` corresponds to a group defined in the inventory files and contains variables specific to that group.
## Run Playbook
To run a first playbook and test the setup the following command can be executed.
To run a playbook, you need to specify the inventory file and the playbook file. For example, to run the `k3s-servers.yml` playbook with the `k3s.ini` inventory, you can use the following command:
```sh
ansible-playbook -i production -J k3s-servers.yml
ansible-playbook -i vars/k3s.ini playbooks/k3s-servers.yml
```
This will run the [./k3s-servers.yml](./k3s-servers.yml) playbook and execute
its roles.
## After successful k3s installation
To access our Kubernetes cluster from our host machine to work on it via
@@ -72,3 +77,16 @@ sudo vgextend k3s-vg /dev/sda3
# Use the newly available storage in the root volume
sudo lvresize -l +100%FREE -r /dev/k3s-vg/root
```
## Cloud Init VMs
```sh
# On Hypervisor Host
qm resize <vmid> scsi0 +32G
# On VM
sudo fdisk -l /dev/sda # To check
echo 1 | sudo tee /sys/class/block/sda/device/rescan
sudo fdisk -l /dev/sda # To check
# sudo apt-get install cloud-guest-utils
sudo growpart /dev/sda 1
```

View File

@@ -1,9 +1,12 @@
[defaults]
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
interpreter_python=python3
# (pathspec) Colon separated paths in which Ansible will search for Roles.
roles_path=./roles
# (pathlist) Comma separated list of Ansible inventory sources
inventory=./inventory/production
inventory=./vars/
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
# If executable, it will be run and the resulting stdout will be used as the password.
@@ -33,3 +36,6 @@ skip=dark gray
[tags]
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=
[inventory]
ignore_extensions={{(REJECT_EXTS + ('.orig', '.cfg', '.retry', '.bak'))}}

View File

@@ -688,4 +688,3 @@
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=

View File

@@ -1,56 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
34623331393561623539666362643966336661326136363431666465356535343663376236663066
3235363061633666626133313363373336656438633566630a383230393161323862303863656464
61633861323966343263363466343130306635343539326464363637383139343033656130336464
3163373535613961340a643335626165306663363063656339653862393533633534366331336231
63393432383731633463323164333831313535373261336166326237306230326465616239306536
37663863663161393130373835373062393866633864373465333937633838303130386334356566
64303663303862623038646235303934376230393538353466393232363764366339616633343433
65343730663864393766313134653335396562646135306637613031333461613965666465376532
32643261626665396338313836633337383932616265613662383132303539623239623965333966
66333638643635313262616434396164313833303065303662303736303232346535613834643435
32316434343231363662393163353832393166643739396165313631363539663439316133616361
61623830613035396333303363383332653736666231343763353666356539633433373066613330
65656631343764323234333161636632616130353139626362343361386535313336666566636464
35323434656439346262336335383366626565333765343562633236636132636532333761663535
31383565313436633438633336306430343733663539666631386532313836623166356332626664
39653762353265643861633237326662383466373539633732323833376238383963393837636466
66656631666131623166393731643537393161303636353932653062363137376334356238643064
34303666656638396263336639636135393536623037666137653132633264316431656438386432
34333632616265343435306365373039653036353337633563393739653632656163316636363336
32346638393364353634386231616639386164326531353134366639653837653236333030666139
64656334336231636337656233383834343763393738643362626665333362353335656131653165
35376330336433383262653039643131313437643265343663626363373439643932643063646439
37663630363839643263373630646430386536346132383564396463376361343661346661333636
39643961643031626462363537633263393838363262626439313838313039373035373634633462
38363938343932626131343966616638323632303636383034383536616164393539343635666166
39383434313863356434383961383139623436636230323866396366326665623863336438623335
33346634303639643131333933363838666336306438646335343931366437326462376438663837
34353938343837663930356464373332356530643231653166616331376335643832316365303164
32393062313638393936393863613731363233376537323834623164613231393133353635623866
35626337336562653265613730363961633662653331663966333430343462666535306133663835
64663539303765366331613666653632313233626231313264346332323266653230323332373836
33303564633464333064613431383230383535633362373839323334353162623433646230393838
33306162613739393338373361616634396636313765326465393332396537613263383339626666
63613162616363363138323965373966353366323463313934356530663931653565656164346363
37633862366436623030303233396639393434336438623433383530393836626164353064366432
35303532393437316162346366346636633135383938323631316563323935383561326335323438
30613266643232656138663431666162663330643133643263343237663565323231316239633037
39323732386236396136633539383335646634306139643533666636633131623566333137376236
39616134306463613864353135313636343365643437323465643862303137663937376233306261
31383862356535646563383438396363323838613237623034656561396163376433663262366137
63323562346633303162666530616534386539383238366139376263326265343138373139393432
35643335363139373139666230626363386232316536306431653964376333366235303763336135
65623231336638643034373932376263636336653561646664366138643031316438316465353363
38386539363631393433313664323135646562313537376236653635303263633230383866653039
66636534336234363438363139366531653237323137613961383831376665626365393462363834
36333965366463636233643433616431376436323535396238363933326363333661326462353161
66626435373938633832393662313161663336613862343332643766333633653866316464653735
31356135363662633961386264613836323435323836386635336338353663333137336666323531
36663731336664633763633634613136663866363530613264356431326539316530326161313362
62616539356537353261343464356334636134396664353463623163313765633432653932346136
32326239373333643461333733646264353238356134613037663836643131316664653539643839
30613235623933356565336630323939633266613164306262386666363137666661666131613962
61623930663536646462343264336535353634373833316537613839396566376466653736333830
33376663613063326230346439626237373232656665633832373364653931663361666432303166
663564323132383864336332363139393534

View File

@@ -1,36 +0,0 @@
#
# Essential
#
root: root
user: tudattr
timezone: Europe/Berlin
puid: "1000"
pgid: "1000"
pk_path: "/media/veracrypt1/genesis"
pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqc9fnzfCz8fQDFzla+D8PBhvaMmFu2aF+TYkkZRxl9 tuan@genesis-2022-01-20"
public_domain: tudattr.dev
internal_domain: seyshiro.de
#
# Packages
#
common_packages:
- build-essential
- curl
- git
- iperf3
- neovim
- rsync
- smartmontools
- sudo
- systemd-timesyncd
- tree
- screen
- bat
- fd-find
- ripgrep
arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"

View File

@@ -1,28 +0,0 @@
db:
default_user:
user: "postgres"
name: "k3s"
user: "k3s"
password: "{{ vault.k3s.postgres.db.password }}"
listen_address: "{{ k3s.db.ip }}"
k3s:
net: "192.168.20.0/24"
server:
ips:
- 192.168.20.21
- 192.168.20.24
- 192.168.20.30
loadbalancer:
ip: 192.168.20.22
default_port: 6443
db:
ip: 192.168.20.23
default_port: "5432"
agent:
ips:
- 192.168.20.25
- 192.168.20.26
- 192.168.20.27
k3s_db_connection_string: "postgres://{{ db.user }}:{{ db.password }}@{{ k3s.db.ip }}:{{ k3s.db.default_port }}/{{ db.name }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.12
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.aya01.root.sudo }}"
host:
hostname: "aya01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.34
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host00.sudo }}"
host:
hostname: "docker-host00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.35
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host01.sudo }}"
host:
hostname: "docker-host01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.36
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.host02.sudo }}"
host:
hostname: "docker-host02"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.37
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.docker.lb.sudo }}"
host:
hostname: "docker-lb"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.14
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.inko.root.sudo }}"
host:
hostname: "inko"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.25
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent00.sudo }}"
host:
hostname: "k3s-agent00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.26
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent01.sudo }}"
host:
hostname: "k3s-agent01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.27
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.agent02.sudo }}"
host:
hostname: "k3s-agent02"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.22
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.loadbalancer.sudo }}"
host:
hostname: "k3s-loadbalancer"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.32
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn00.sudo }}"
host:
hostname: "k3s-longhorn00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.33
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn01.sudo }}"
host:
hostname: "k3s-longhorn01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.31
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.longhorn02.sudo }}"
host:
hostname: "k3s-longhorn02"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.23
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.postgres.sudo }}"
host:
hostname: "k3s-postgres"
ip: "{{ ansible_host }}"

View File

@@ -1,9 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.21
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server00.sudo }}"
host:
hostname: "k3s-server00"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.24
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
host:
hostname: "k3s-server01"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "{{ user }}"
ansible_host: 192.168.20.30
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.k3s.server02.sudo }}"
host:
hostname: "k3s-server02"
ip: "{{ ansible_host }}"

View File

@@ -1,10 +0,0 @@
---
ansible_user: "root"
ansible_host: 192.168.20.28
ansible_port: 22
ansible_ssh_private_key_file: "{{ pk_path }}"
ansible_become_pass: "{{ vault.pve.lulu.root.sudo }}"
host:
hostname: "lulu"
ip: "{{ ansible_host }}"

View File

@@ -1,55 +0,0 @@
[proxmox]
aya01
lulu
inko
[k3s]
k3s-postgres
k3s-loadbalancer
k3s-server[00:02]
k3s-agent[00:02]
k3s-longhorn[00:02]
[vm]
k3s-postgres
k3s-loadbalancer
k3s-agent[00:02]
k3s-server[00:02]
k3s-longhorn[00:02]
docker-host[00:02]
[k3s_nodes]
k3s-server[00:02]
k3s-agent[00:02]
k3s-longhorn[00:02]
[docker]
docker-host[00:02]
docker-lb
[vps]
mii
[k3s_server]
k3s-server[00:02]
[k3s_agent]
k3s-agent[00:02]
[k3s_storage]
k3s-longhorn[00:02]
[db]
k3s-postgres
[loadbalancer]
k3s-loadbalancer
[docker_host]
docker-host[00:02]
[docker_lb]
docker-lb
[vm:vars]
ansible_ssh_common_args='-o ProxyCommand="ssh -p 22 -W %h:%p -q aya01"'

View File

@@ -1,10 +0,0 @@
---
- name: Run the common role on k3s
hosts: k3s
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common

View File

@@ -1,19 +0,0 @@
---
- name: Set up Servers
hosts: db
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: postgres
tags:
- postgres
- role: node_exporter
tags:
- node_exporter
- role: postgres_exporter
tags:
- postgres_exporter

View File

@@ -1,9 +1,7 @@
---
- name: Set up Servers
hosts: docker_host
gather_facts: yes
vars_files:
- secrets.yml
gather_facts: true
roles:
- role: common
tags:

View File

@@ -1,13 +1,13 @@
---
- name: Set up reverse proxy for docker
hosts: docker_lb
gather_facts: yes
vars_files:
- secrets.yml
hosts: docker
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["docker_lb"]
- role: reverse_proxy
tags:
- reverse_proxy
when: inventory_hostname in groups["docker_lb"]

5
playbooks/docker.yml Normal file
View File

@@ -0,0 +1,5 @@
---
- name: Setup Docker Hosts
ansible.builtin.import_playbook: docker-host.yml
- name: Setup Docker load balancer
ansible.builtin.import_playbook: docker-lb.yml

View File

@@ -1,20 +1,6 @@
- name: Set up Agents
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
hosts: k3s
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_agent"]
@@ -22,10 +8,9 @@
- common
- role: k3s_agent
when: inventory_hostname in groups["k3s_agent"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_agent
- role: node_exporter
when: inventory_hostname in groups["k3s_agent"]
tags:
- node_exporter
# - role: node_exporter
# when: inventory_hostname in groups["k3s_agent"]
# tags:
# - node_exporter

View File

@@ -0,0 +1,17 @@
---
- name: Set up Servers
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
when: inventory_hostname in groups["k3s_loadbalancer"]
- role: k3s_loadbalancer
tags:
- k3s_loadbalancer
when: inventory_hostname in groups["k3s_loadbalancer"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_loadbalancer"]

View File

@@ -1,16 +1,17 @@
---
- name: Set up Servers
hosts: k3s_server
gather_facts: yes
vars_files:
- secrets.yml
hosts: k3s
gather_facts: true
roles:
- role: common
tags:
- common
# - role: common
# tags:
# - common
# when: inventory_hostname in groups["k3s_server"]
- role: k3s_server
tags:
- k3s_server
- role: node_exporter
tags:
- node_exporter
when: inventory_hostname in groups["k3s_server"]
# - role: node_exporter
# tags:
# - node_exporter
# when: inventory_hostname in groups["k3s_server"]

View File

@@ -1,20 +1,6 @@
- name: Set up storage
hosts: k3s_nodes
gather_facts: yes
vars_files:
- secrets.yml
pre_tasks:
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
gather_facts: true
roles:
- role: common
when: inventory_hostname in groups["k3s_storage"]
@@ -22,10 +8,9 @@
- common
- role: k3s_storage
when: inventory_hostname in groups["k3s_storage"]
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
tags:
- k3s_storage
- role: node_exporter
when: inventory_hostname in groups["k3s_storage"]
tags:
- node_exporter
# - role: node_exporter
# when: inventory_hostname in groups["k3s_storage"]
# tags:
# - node_exporter

View File

@@ -0,0 +1,10 @@
---
- name: Setup Kubernetes Cluster
hosts: kubernetes
any_errors_fatal: true
gather_facts: false
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
roles:
- role: kubernetes_argocd
when: is_localhost

View File

@@ -1,16 +0,0 @@
---
- name: Set up Servers
hosts: loadbalancer
gather_facts: yes
vars_files:
- secrets.yml
roles:
- role: common
tags:
- common
- role: loadbalancer
tags:
- loadbalancer
- role: node_exporter
tags:
- node_exporter

15
playbooks/proxmox.yml Normal file
View File

@@ -0,0 +1,15 @@
---
- name: Run proxmox vm playbook
hosts: proxmox
gather_facts: true
vars:
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
is_proxmox_node: "{{ 'proxmox_nodes' in group_names }}"
roles:
- role: common
tags:
- common
when: not is_localhost
- role: proxmox
tags:
- proxmox

View File

@@ -1,9 +0,0 @@
---
- hosts: db
gather_facts: yes
vars_files:
- secrets.yml
tasks:
- name: Print the database connection string
debug:
msg: "{{ k3s_db_connection_string }}"

28
requirements.txt Normal file
View File

@@ -0,0 +1,28 @@
cachetools==5.5.2
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
distlib==0.4.0
durationpy==0.10
filelock==3.18.0
google-auth==2.40.3
identify==2.6.12
idna==3.10
kubernetes==33.1.0
nc-dnsapi==0.1.3
nodeenv==1.9.1
oauthlib==3.3.1
platformdirs==4.3.8
pre_commit==4.2.0
proxmoxer==2.2.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9.1
six==1.17.0
urllib3==2.3.0
virtualenv==20.32.0
websocket-client==1.8.0

5
requirements.yml Normal file
View File

@@ -0,0 +1,5 @@
---
collections:
- name: community.docker
- name: community.general
- name: kubernetes.core

49
roles/common/README.md Normal file
View File

@@ -0,0 +1,49 @@
# Ansible Role: common
This role configures a baseline set of common configurations for Debian-based systems.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `vars/main.yml`):
```yaml
# The hostname to configure.
hostname: "new-host"
# A list of extra packages to install.
extra_packages:
- "htop"
- "ncdu"
- "stow"
- "unzip"
```
## Dependencies
None.
## Example Playbook
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
```yaml
- hosts: servers
roles:
- role: common
hostname: "my-new-host"
extra_packages:
- "vim"
- "curl"
```
## License
MIT
## Author Information
This role was created in 2025 by [TuDatTr](https://codeberg.org/tudattr/).

View File

@@ -0,0 +1,80 @@
xterm-ghostty|ghostty|Ghostty,
am, bce, ccc, hs, km, mc5i, mir, msgr, npc, xenl, AX, Su, Tc, XT, fullkbd,
colors#0x100, cols#80, it#8, lines#24, pairs#0x7fff,
acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, civis=\E[?25l,
clear=\E[H\E[2J, cnorm=\E[?12l\E[?25h, cr=\r,
csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
cud=\E[%p1%dB, cud1=\n, cuf=\E[%p1%dC, cuf1=\E[C,
cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
cvvis=\E[?12;25h, dch=\E[%p1%dP, dch1=\E[P, dim=\E[2m,
dl=\E[%p1%dM, dl1=\E[M, dsl=\E]2;\007, ech=\E[%p1%dX,
ed=\E[J, el=\E[K, el1=\E[1K, flash=\E[?5h$<100/>\E[?5l,
fsl=^G, home=\E[H, hpa=\E[%i%p1%dG, ht=^I, hts=\EH,
ich=\E[%p1%d@, ich1=\E[@, il=\E[%p1%dL, il1=\E[L, ind=\n,
indn=\E[%p1%dS,
initc=\E]4;%p1%d;rgb:%p2%{255}%*%{1000}%/%2.2X/%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\E\\,
invis=\E[8m, kDC=\E[3;2~, kEND=\E[1;2F, kHOM=\E[1;2H,
kIC=\E[2;2~, kLFT=\E[1;2D, kNXT=\E[6;2~, kPRV=\E[5;2~,
kRIT=\E[1;2C, kbs=^?, kcbt=\E[Z, kcub1=\EOD, kcud1=\EOB,
kcuf1=\EOC, kcuu1=\EOA, kdch1=\E[3~, kend=\EOF, kent=\EOM,
kf1=\EOP, kf10=\E[21~, kf11=\E[23~, kf12=\E[24~,
kf13=\E[1;2P, kf14=\E[1;2Q, kf15=\E[1;2R, kf16=\E[1;2S,
kf17=\E[15;2~, kf18=\E[17;2~, kf19=\E[18;2~, kf2=\EOQ,
kf20=\E[19;2~, kf21=\E[20;2~, kf22=\E[21;2~,
kf23=\E[23;2~, kf24=\E[24;2~, kf25=\E[1;5P, kf26=\E[1;5Q,
kf27=\E[1;5R, kf28=\E[1;5S, kf29=\E[15;5~, kf3=\EOR,
kf30=\E[17;5~, kf31=\E[18;5~, kf32=\E[19;5~,
kf33=\E[20;5~, kf34=\E[21;5~, kf35=\E[23;5~,
kf36=\E[24;5~, kf37=\E[1;6P, kf38=\E[1;6Q, kf39=\E[1;6R,
kf4=\EOS, kf40=\E[1;6S, kf41=\E[15;6~, kf42=\E[17;6~,
kf43=\E[18;6~, kf44=\E[19;6~, kf45=\E[20;6~,
kf46=\E[21;6~, kf47=\E[23;6~, kf48=\E[24;6~,
kf49=\E[1;3P, kf5=\E[15~, kf50=\E[1;3Q, kf51=\E[1;3R,
kf52=\E[1;3S, kf53=\E[15;3~, kf54=\E[17;3~,
kf55=\E[18;3~, kf56=\E[19;3~, kf57=\E[20;3~,
kf58=\E[21;3~, kf59=\E[23;3~, kf6=\E[17~, kf60=\E[24;3~,
kf61=\E[1;4P, kf62=\E[1;4Q, kf63=\E[1;4R, kf7=\E[18~,
kf8=\E[19~, kf9=\E[20~, khome=\EOH, kich1=\E[2~,
kind=\E[1;2B, kmous=\E[<, knp=\E[6~, kpp=\E[5~,
kri=\E[1;2A, oc=\E]104\007, op=\E[39;49m, rc=\E8,
rep=%p1%c\E[%p2%{1}%-%db, rev=\E[7m, ri=\EM,
rin=\E[%p1%dT, ritm=\E[23m, rmacs=\E(B, rmam=\E[?7l,
rmcup=\E[?1049l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
rmul=\E[24m, rs1=\E]\E\\\Ec, sc=\E7,
setab=\E[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m,
setaf=\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m,
sgr=%?%p9%t\E(0%e\E(B%;\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m,
sgr0=\E(B\E[m, sitm=\E[3m, smacs=\E(0, smam=\E[?7h,
smcup=\E[?1049h, smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m,
smul=\E[4m, tbc=\E[3g, tsl=\E]2;, u6=\E[%i%d;%dR, u7=\E[6n,
u8=\E[?%[;0123456789]c, u9=\E[c, vpa=\E[%i%p1%dd,
BD=\E[?2004l, BE=\E[?2004h, Clmg=\E[s,
Cmg=\E[%i%p1%d;%p2%ds, Dsmg=\E[?69l, E3=\E[3J,
Enmg=\E[?69h, Ms=\E]52;%p1%s;%p2%s\007, PE=\E[201~,
PS=\E[200~, RV=\E[>c, Se=\E[2 q,
Setulc=\E[58:2::%p1%{65536}%/%d:%p1%{256}%/%{255}%&%d:%p1%{255}%&%d%;m,
Smulx=\E[4:%p1%dm, Ss=\E[%p1%d q,
Sync=\E[?2026%?%p1%{1}%-%tl%eh%;,
XM=\E[?1006;1000%?%p1%{1}%=%th%el%;, XR=\E[>0q,
fd=\E[?1004l, fe=\E[?1004h, kDC3=\E[3;3~, kDC4=\E[3;4~,
kDC5=\E[3;5~, kDC6=\E[3;6~, kDC7=\E[3;7~, kDN=\E[1;2B,
kDN3=\E[1;3B, kDN4=\E[1;4B, kDN5=\E[1;5B, kDN6=\E[1;6B,
kDN7=\E[1;7B, kEND3=\E[1;3F, kEND4=\E[1;4F,
kEND5=\E[1;5F, kEND6=\E[1;6F, kEND7=\E[1;7F,
kHOM3=\E[1;3H, kHOM4=\E[1;4H, kHOM5=\E[1;5H,
kHOM6=\E[1;6H, kHOM7=\E[1;7H, kIC3=\E[2;3~, kIC4=\E[2;4~,
kIC5=\E[2;5~, kIC6=\E[2;6~, kIC7=\E[2;7~, kLFT3=\E[1;3D,
kLFT4=\E[1;4D, kLFT5=\E[1;5D, kLFT6=\E[1;6D,
kLFT7=\E[1;7D, kNXT3=\E[6;3~, kNXT4=\E[6;4~,
kNXT5=\E[6;5~, kNXT6=\E[6;6~, kNXT7=\E[6;7~,
kPRV3=\E[5;3~, kPRV4=\E[5;4~, kPRV5=\E[5;5~,
kPRV6=\E[5;6~, kPRV7=\E[5;7~, kRIT3=\E[1;3C,
kRIT4=\E[1;4C, kRIT5=\E[1;5C, kRIT6=\E[1;6C,
kRIT7=\E[1;7C, kUP=\E[1;2A, kUP3=\E[1;3A, kUP4=\E[1;4A,
kUP5=\E[1;5A, kUP6=\E[1;6A, kUP7=\E[1;7A, kxIN=\E[I,
kxOUT=\E[O, rmxx=\E[29m, rv=\E\\[[0-9]+;[0-9]+;[0-9]+c,
setrgbb=\E[48:2:%p1%d:%p2%d:%p3%dm,
setrgbf=\E[38:2:%p1%d:%p2%d:%p3%dm, smxx=\E[9m,
xm=\E[<%i%p3%d;%p1%d;%p2%d;%?%p4%tM%em%;,
xr=\EP>\\|[ -~]+a\E\\,

View File

@@ -0,0 +1,18 @@
Protocol 2
PermitRootLogin yes
MaxAuthTries 3
PubkeyAuthentication yes
PasswordAuthentication no
PermitEmptyPasswords no
ChallengeResponseAuthentication no
UsePAM yes
AllowAgentForwarding no
AllowTcpForwarding yes
X11Forwarding no
PrintMotd no
TCPKeepAlive no
ClientAliveCountMax 2
TrustedUserCAKeys /etc/ssh/vault-ca.pub
UseDNS yes
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@@ -1,4 +1,3 @@
Include /etc/ssh/sshd_config.d/*.conf
Protocol 2
PermitRootLogin no
MaxAuthTries 3
@@ -13,6 +12,7 @@ X11Forwarding no
PrintMotd no
TCPKeepAlive no
ClientAliveCountMax 2
TrustedUserCAKeys /etc/ssh/vault-ca.pub
UseDNS yes
AcceptEnv LANG LC_*
Subsystem sftp /usr/lib/openssh/sftp-server

View File

@@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxIbkko72kVSfYDjJpiMH9SjHUGqBn3MbBvmotsPQhybFgnnkBpX/3fM9olP+Z6PGsmbOEs0fOjPS6uY5hjKcKsyHdZfS6cA4wjY/DL8fwATAW5FCDBtMpdg2/sb8j9jutHHs4sQeRBolVwKcv+ZAaJNnOzNHwxVUfT9bNwShthnAFjkY7oZo657FRomlkDJjmGQuratP0veKA8jYzqqPWwWidTGQerLYTyJ3Z8pbQa5eN7svrvabjjDLbVTDESE8st9WEmwvAwoj7Kz+WovCy0Uz7LRFVmaRiapM8SXtPPUC0xfyzAB3NxwBtxizdUMlShvLcL6cujcUBMulVMpsqEaOESTpmVTrMJhnJPZG/3j9ziGoYIa6hMj1J9/qLQ5dDNVVXMxw99G31x0LJoy12IE90P4Cahux8iN0Cp4oB4+B6/qledxs1fcRzsnQY/ickjKhqcJwgHzsnwjDkeYRaYte5x4f/gJ77kA20nPto7mxr2mhWot/i9B1KlMURVXOH/q4nrzhJ0hPJpM0UtzQ58TmzE4Osf/B5yoe8V//6XnelbmG/nKCIzg12d7PvaLjbFMn8IgOwDMRlip+vpyadRr/+pCawrfo4vLF7BsnJ84aoByIpbwaysgaYHtjfZWImorMVkgviC4O6Hn9/ZiLNze2A9DaNUnLVJ0nYNbmv9Q==

View File

@@ -3,4 +3,4 @@
service:
name: sshd
state: restarted
become: yes
become: true

View File

@@ -2,11 +2,23 @@
- name: Copy bash-configs
ansible.builtin.template:
src: "files/bash/{{ item }}"
dest: "/home/{{ user }}/.{{ item }}"
owner: "{{ user }}"
group: "{{ user }}"
dest: "{{ ansible_env.HOME }}/.{{ item }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
loop:
- bashrc
- bash_aliases
become: true
- name: Copy ghostty infocmp
ansible.builtin.copy:
src: files/ghostty/infocmp
dest: "{{ ansible_env.HOME }}/ghostty"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "0644"
register: ghostty_terminfo
- name: Compile ghostty terminalinfo
ansible.builtin.command: "tic -x {{ ansible_env.HOME }}/ghostty"
when: ghostty_terminfo.changed

View File

@@ -11,7 +11,6 @@
url: https://raw.githubusercontent.com/eza-community/eza/main/deb.asc
dest: /etc/apt/keyrings/gierens.asc
mode: "0644"
register: gpg_key_result
become: true
- name: Add Gierens repository to apt sources

View File

@@ -1,14 +1,14 @@
---
- name: Set a hostname
ansible.builtin.hostname:
name: "{{ host.hostname }}"
name: "{{ inventory_hostname }}"
become: true
- name: Update /etc/hosts to reflect the new hostname
ansible.builtin.lineinfile:
path: /etc/hosts
regexp: '^127\.0\.1\.1'
line: "127.0.1.1 {{ host.hostname }}"
line: "127.0.1.1 {{ inventory_hostname }}"
state: present
backup: true
become: true

View File

@@ -1,10 +1,10 @@
---
- name: Configure Time
ansible.builtin.include_tasks: time.yml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yml
- name: Configure Packages
ansible.builtin.include_tasks: packages.yml
- name: Configure Hostname
ansible.builtin.include_tasks: hostname.yml
- name: Configure Extra-Packages
ansible.builtin.include_tasks: extra_packages.yml
- name: Configure Bash

View File

@@ -5,9 +5,24 @@
upgrade: true
autoremove: true
become: true
when: ansible_user_id != "root"
- name: Install base packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
become: true
when: ansible_user_id != "root"
- name: Update and upgrade packages
ansible.builtin.apt:
update_cache: true
upgrade: true
autoremove: true
when: ansible_user_id == "root"
- name: Install base packages
ansible.builtin.apt:
name: "{{ common_packages }}"
state: present
when: ansible_user_id == "root"

View File

@@ -1,17 +1,28 @@
---
- name: Copy sshd_config
- name: Copy user sshd_config
ansible.builtin.template:
src: templates/ssh/sshd_config
src: files/ssh/user/sshd_config
dest: /etc/ssh/sshd_config
mode: "644"
backup: true
notify:
- Restart sshd
become: true
when: ansible_user_id != "root"
- name: Copy root sshd_config
ansible.builtin.template:
src: files/ssh/root/sshd_config
dest: /etc/ssh/sshd_config
mode: "644"
backup: true
notify:
- Restart sshd
when: ansible_user_id == "root"
- name: Copy pubkey
ansible.builtin.copy:
content: "{{ pubkey }}"
dest: "/home/{{ user }}/.ssh/authorized_keys"
owner: "{{ user }}"
group: "{{ user }}"
src: files/ssh/vault-ca.pub
dest: "/etc/ssh/vault-ca.pub"
mode: "644"
become: true

View File

@@ -1,4 +1,11 @@
---
- name: Set timezone to "{{ timezone }}"
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
become: true
when: ansible_user_id != "root"
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"
when: ansible_user_id == "root"

View File

@@ -0,0 +1,16 @@
common_packages:
- build-essential
- curl
- git
- iperf3
- neovim
- rsync
- smartmontools
- sudo
- systemd-timesyncd
- tree
- screen
- bat
- fd-find
- ripgrep
- nfs-common

View File

@@ -0,0 +1,85 @@
# Ansible Role: Docker Host
This role sets up a Docker host, installs Docker, and configures it according to the provided variables. It also handles user and group management, directory setup, and deployment of Docker Compose services.
## Role Variables
### General
- `docker_host_package_common_dependencies`: A list of common packages to be installed on the host.
- Default: `nfs-common`, `firmware-misc-nonfree`, `linux-image-amd64`
- `apt_lock_files`: A list of apt lock files to check.
- `arch`: The architecture of the host.
- Default: `arm64` if `ansible_architecture` is `aarch64`, otherwise `amd64`.
### Docker
- `docker.url`: The URL for the Docker repository.
- Default: `https://download.docker.com/linux`
- `docker.apt_release_channel`: The Docker apt release channel.
- Default: `stable`
- `docker.directories.local`: The local directory for Docker data.
- Default: `/opt/local`
- `docker.directories.config`: The directory for Docker configurations.
- Default: `/opt/config`
- `docker.directories.compose`: The directory for Docker Compose files.
- Default: `/opt/compose`
### Keycloak
- `keycloak_config`: A dictionary containing the Keycloak configuration. See `templates/keycloak/realm.json.j2` for more details.
### Services
- `services`: A list of dictionaries, where each dictionary represents a Docker Compose service. See `templates/compose.yaml.j2` for more details.
## Tasks
The role performs the following tasks:
1. **Setup VM**:
- Includes `non-free` and `non-free-firmware` components in the apt sources.
- Installs common packages.
- Removes cloud kernel packages.
- Reboots the host.
2. **Install Docker**:
- Uninstalls old Docker versions.
- Installs dependencies for using repositories over HTTPS.
- Adds the Docker apt key and repository.
- Installs Docker Engine, containerd, and Docker Compose.
3. **Setup user and group for Docker**:
- Ensures the `docker` group exists.
- Adds the `ansible_user_id` to the `docker` group.
- Reboots the host.
4. **Setup directory structure for Docker**:
- Creates necessary directories for Docker and media.
- Sets ownership of the directories.
- Mounts NFS shares.
5. **Deploy configs**:
- Sets up Keycloak realms if the host is a Keycloak host.
6. **Deploy Docker Compose**:
- Copies the Docker Compose file to the target host.
7. **Publish metrics**:
- Copies the `daemon.json` file to `/etc/docker/daemon.json` to enable metrics.
## Handlers
- `Restart docker`: Restarts the Docker service.
- `Restart compose`: Restarts the Docker Compose services.
- `Restart host`: Reboots the host.
## Usage
To use this role, include it in your playbook and set the required variables.
```yaml
- hosts: docker_hosts
roles:
- role: docker_host
vars:
# Your variables here
```
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details.

View File

@@ -8,4 +8,14 @@
- name: Restart compose
community.docker.docker_compose_v2:
project_src: "{{ docker.directories.compose }}"
state: restarted
state: present
retries: 3
delay: 5
become: true
- name: Restart host
ansible.builtin.reboot:
connect_timeout: 5
reboot_timeout: 600
test_command: whoami
become: true

View File

@@ -0,0 +1,50 @@
---
- name: Check if debian.sources file exists
ansible.builtin.stat:
path: /etc/apt/sources.list.d/debian.sources
register: debian_sources_stat
- name: Replace Components line to include non-free and non-free-firmware
ansible.builtin.replace:
path: /etc/apt/sources.list.d/debian.sources
regexp: "^Components:.*$"
replace: "Components: main non-free non-free-firmware"
when: debian_sources_stat.stat.exists
become: true
- name: Setup VM Packages
ansible.builtin.apt:
name: "{{ item }}"
state: present
update_cache: true
loop: "{{ docker_host_package_common_dependencies }}"
become: true
- name: Gather installed package facts
ansible.builtin.package_facts:
manager: auto
- name: Filter for specific cloud kernel packages
ansible.builtin.set_fact:
cloud_kernel_packages: >-
{{
ansible_facts.packages.keys()
| select('search', 'linux-image')
| select('search', 'cloud')
| list
}}
- name: Use the list to remove the found packages
ansible.builtin.apt:
name: "{{ cloud_kernel_packages }}"
state: absent
autoremove: true
when: cloud_kernel_packages | length > 0
become: true
- name: Restart host
ansible.builtin.reboot:
connect_timeout: 5
reboot_timeout: 600
test_command: whoami
become: true

View File

@@ -26,6 +26,7 @@
- curl
- gnupg
- lsb-release
- qemu-guest-agent
become: true
- name: Add Docker apt key.

View File

@@ -5,10 +5,12 @@
state: present
become: true
- name: Append the group docker to "{{ user }}"
- name: Append the group docker to "{{ ansible_user_id }}"
ansible.builtin.user:
name: "{{ user }}"
name: "{{ ansible_user_id }}"
shell: /bin/bash
groups: docker
append: true
become: true
notify:
- Restart host

View File

@@ -9,19 +9,20 @@
- /media/series
- /media/movies
- /media/songs
- "{{ docker.directories.opt }}"
- "{{ docker.directories.local }}"
- "{{ docker.directories.config }}"
- "{{ docker.directories.compose }}"
- /opt/local
become: true
- name: Set ownership to {{ user }}
- name: Set ownership to {{ ansible_user_id }}
ansible.builtin.file:
path: "{{ item }}"
owner: "{{ user }}"
group: "{{ user }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
loop:
- "{{ docker.directories.opt }}"
- /opt/local
- "{{ docker.directories.local }}"
- "{{ docker.directories.config }}"
- "{{ docker.directories.compose }}"
- /media
become: true

View File

@@ -0,0 +1,31 @@
---
- name: Set fact if this host should run Keycloak
ansible.builtin.set_fact:
is_keycloak_host: "{{ inventory_hostname in (services | selectattr('name', 'equalto', 'keycloak') | map(attribute='vm') | first) }}"
- name: Create Keycloak directories
ansible.builtin.file:
path: "{{ docker.directories.local }}/keycloak/"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
state: directory
mode: "0755"
when: is_keycloak_host | bool
become: true
- name: Setup Keycloak realms
ansible.builtin.template:
src: "templates/keycloak/realm.json.j2"
dest: "{{ docker.directories.local }}/keycloak/{{ keycloak.realm }}-realm.json"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
backup: true
when: is_keycloak_host | bool
loop: "{{ keycloak_config.realms }}"
loop_control:
loop_var: keycloak
notify:
- Restart docker
- Restart compose
become: true

View File

@@ -3,8 +3,8 @@
ansible.builtin.template:
src: "templates/compose.yaml.j2"
dest: "{{ docker.directories.compose }}/compose.yaml"
owner: "{{ user }}"
group: "{{ user }}"
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_id }}"
mode: "644"
backup: true
notify:

View File

@@ -1,18 +1,21 @@
---
- name: Setup VM
ansible.builtin.include_tasks: setup.yml
ansible.builtin.include_tasks: 10_setup.yml
- name: Install docker
ansible.builtin.include_tasks: installation.yml
ansible.builtin.include_tasks: 20_installation.yml
- name: Setup user and group for docker
ansible.builtin.include_tasks: user_group_setup.yml
ansible.builtin.include_tasks: 30_user_group_setup.yml
- name: Setup directory structure for docker
ansible.builtin.include_tasks: directory_setup.yml
ansible.builtin.include_tasks: 40_directory_setup.yml
- name: Deploy configs
ansible.builtin.include_tasks: 50_provision.yml
- name: Deploy docker compose
ansible.builtin.include_tasks: deploy_compose.yml
ansible.builtin.include_tasks: 60_deploy_compose.yml
- name: Publish metrics
ansible.builtin.include_tasks: export.yml
ansible.builtin.include_tasks: 70_export.yml

View File

@@ -1,9 +0,0 @@
---
- name: Enable HW accelerate for VM
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- firmware-misc-nonfree
- nfs-common
become: true

View File

@@ -1,12 +1,13 @@
services:
{% for service in services %}
{% if inventory_hostname in service.vm %}
{{service.name}}:
{{ service.name }}:
container_name: {{ service.container_name }}
image: {{ service.image }}
restart: {{ service.restart }}
restart: unless-stopped
{% if service.network_mode is not defined %}
hostname: {{service.name}}
hostname: {{ service.name }}
networks:
- net
{% endif %}
@@ -15,11 +16,40 @@ services:
ports:
{% for port in service.ports %}
{% if port.internal != 'proxy_only' %}
- {{port.external}}:{{port.internal}}
- {{ port.external }}:{{ port.internal }}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% if service.ports is defined and service.ports is iterable %}
{% set first_http_port = service.ports | default([]) | selectattr('name', 'defined') | selectattr('name', 'search', 'http') | first %}
{% set chosen_http_port_value = none %}
{% if first_http_port is not none %}
{% if first_http_port.internal is defined and first_http_port.internal == 'proxy_only' %}
{% if first_http_port.external is defined %}
{% set chosen_http_port_value = first_http_port.external %}
{% endif %}
{% else %}
{% set chosen_http_port_value = first_http_port.internal %}
{% endif %}
{% if chosen_http_port_value is defined %}
healthcheck:
{% set healthcheck = 'curl' %}
{% if service.healthcheck is defined %}
{% set healthcheck = service.healthcheck %}
{% endif %}
{% if healthcheck == 'curl' %}
test: ["CMD", "curl", "-f", "--silent", "--show-error", "--connect-timeout", "5", "http://localhost:{{ chosen_http_port_value }}/"]
{% elif healthcheck == 'wget' %}
test: ["CMD-SHELL", "wget --quiet --spider --timeout=5 http://localhost:{{ chosen_http_port_value }}/ || exit 1"]
{% endif %}
interval: 30s
timeout: 10s
retries: 5
start_period: 20s
{% endif %}
{% endif %}
{% endif %}
{% if service.cap_add is defined and service.cap_add is iterable %}
cap_add:
{% for cap in service.cap_add %}
@@ -41,46 +71,88 @@ services:
{% if service.volumes is defined and service.volumes is iterable %}
volumes:
{% for volume in service.volumes %}
- {{volume.external}}:{{volume.internal}}
- {{ volume.external }}:{{ volume.internal }}
{% endfor %}
{% endif %}
{% if service.environment is defined and service.environment is iterable %}
environment:
{% for env in service.environment %}
- {{env}}
- {{ env }}
{% endfor %}
{% endif %}
{% if service.devices is defined and service.devices is iterable %}
devices:
{% for device in service.devices %}
- {{device.external}}:{{device.internal}}
- {{ device.external }}:{{ device.internal }}
{% endfor %}
{% endif %}
{% if service.name == 'paperless' %}
{{service.name}}-broker:
container_name: paperless-broker
image: docker.io/library/redis:7
restart: unless-stopped
networks:
- net
volumes:
- /opt/local/paperless/redis/data:/data
{{service.name}}-postgres:
container_name: paperless-postgres
image: docker.io/library/postgres:15
restart: unless-stopped
networks:
- net
volumes:
- /opt/local/paperless/db/data:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: 5fnhn%u2YWY3paNvMAjdoufYPQ2Hf3Yi
{% if service.command is defined and service.command is iterable %}
command:
{% for command in service.command %}
- {{ command }}
{% endfor %}
{% endif %}
{% if service.sub_service is defined and service.sub_service is iterable %}
{% for sub in service.sub_service %}
{% if sub.name is defined and sub.name == "postgres" %}
{{ service.name }}-postgres:
container_name: {{ service.name }}-postgres
image: docker.io/library/postgres:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-postgres
networks:
- net
volumes:
- /opt/local/{{ service.name }}/postgres/data:/var/lib/postgresql/data
environment:
POSTGRES_DB: {{ service.name }}
POSTGRES_USER: {{ sub.username }}
POSTGRES_PASSWORD: {{ sub.password }}
{% endif %}
{% if sub.name is defined and sub.name == "redis" %}
{{ service.name }}-redis:
container_name: {{ service.name }}-redis
image: docker.io/library/redis:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-redis
networks:
- net
volumes:
- /opt/local/{{ service.name }}/redis/data:/data
{% endif %}
{% if sub.name is defined and sub.name == "chrome" %}
{{ service.name }}-chrome:
image: gcr.io/zenika-hub/alpine-chrome:{{ sub.version }}
container_name: {{ service.name }}-chrome
restart: unless-stopped
networks:
- net
command:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
{% endif %}
{% if sub.name is defined and sub.name == "meilisearch" %}
{{ service.name }}-meilisearch:
container_name: {{ service.name }}-meilisearch
image: getmeili/meilisearch:{{ sub.version }}
restart: unless-stopped
hostname: {{ service.name }}-meilisearch
networks:
- net
volumes:
- /opt/local/{{ service.name }}/mailisearch/data:/meili_data
environment:
- MEILI_NO_ANALYTICS=true
- NEXTAUTH_SECRET={{ sub.nextauth_secret }}
- MEILI_MASTER_KEY={{ sub.meili_master_key }}
- OPENAI_API_KEY="{{ sub.openai_key }}"
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% endfor %}
networks:
@@ -90,6 +162,3 @@ networks:
driver: default
config:
- subnet: 172.16.69.0/24
volumes:
prometheus_data: {}

View File

@@ -0,0 +1,79 @@
{
"realm": "{{ keycloak.realm }}",
"enabled": true,
"displayName": "{{ keycloak.display_name }}",
"displayNameHtml": "<div class=\"kc-logo-text\">{{keycloak.display_name}}</div>",
"bruteForceProtected": true,
"users": [
{% if keycloak.users is defined and keycloak.users is iterable %}
{% for user in keycloak.users %}
{
"username": "{{ user.username }}",
"enabled": true,
"credentials": [
{
"type": "password",
"value": "{{ user.password }}",
"temporary": false
}
],
"realmRoles": [
{% for realm_role in user.realm_roles %}
"{{ realm_role }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"clientRoles": {
"account": [
{% for account in user.client_roles.account %}
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}
},{% if not loop.last %}{% endif %}
{% endfor %}
{% endif %}
{
"username": "{{ keycloak.admin.username }}",
"enabled": true,
"credentials": [
{
"type": "password",
"value": "{{ keycloak.admin.password }}",
"temporary": false
}
],
"realmRoles": [
{% for realm_role in keycloak.admin.realm_roles %}
"{{ realm_role }}"{% if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"clientRoles": {
"realm-management": [
{% for realm_management in keycloak.admin.client_roles.realm_management %}
"{{ realm_management }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
],
"account": [
{% for account in keycloak.admin.client_roles.account %}
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}
}
],
"roles": {
"realm": [
{% for role in keycloak.roles.realm %}
{
"name": "{{ role.name }}",
"description": "{{ role.name }}"
}{% if not loop.last %},{% endif %}
{% endfor %}
]
},
"defaultRoles": [
{% for role in keycloak.roles.default_roles %}
"{{ role }}"{% if not loop.last %},{% endif %}{{''}}
{% endfor %}
]
}

View File

@@ -0,0 +1,9 @@
docker_host_package_common_dependencies:
- nfs-common
- firmware-misc-nonfree
- linux-image-amd64
apt_lock_files:
- /var/lib/dpkg/lock
- /var/lib/dpkg/lock-frontend
- /var/cache/apt/archives/lock

39
roles/k3s_agent/README.md Normal file
View File

@@ -0,0 +1,39 @@
# K3s Agent Ansible Role
This Ansible role installs and configures a K3s agent on a node.
## Role Variables
- `k3s.loadbalancer.default_port`: The port for the K3s load balancer. Defaults to `6443`.
- `k3s_token`: The token for joining the K3s cluster. This is a required variable.
- `hostvars['k3s-loadbalancer'].ansible_default_ipv4.address`: The IP address of the K3s load balancer. This is a required variable.
## Tasks
The main tasks are in `tasks/main.yml` and `tasks/installation.yml`.
- **`installation.yml`**:
- Installs `qemu-guest-agent`.
- Checks if K3s is already installed.
- Downloads the K3s installation script to `/tmp/k3s_install.sh`.
- Installs K3s as an agent, connecting to the master.
## Handlers
The main handlers are in `handlers/main.yml`.
- **`Restart k3s`**: Restarts the `k3s` service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
---
- hosts: k3s_agents
roles:
- role: k3s_agent
vars:
k3s_token: "your_k3s_token"
k3s.loadbalancer.default_port: 6443
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -1,4 +1,12 @@
---
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
@@ -11,11 +19,11 @@
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s on the secondary servers
- name: Install K3s on agent
when: not k3s_status.stat.exists
ansible.builtin.command: |
/tmp/k3s_install.sh
environment:
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
K3S_TOKEN: "{{ k3s_token }}"
become: true

View File

@@ -1,2 +1,3 @@
---
- include_tasks: installation.yml
- name: Install k3s agent
include_tasks: installation.yml

View File

@@ -0,0 +1,50 @@
# K3s Loadbalancer Ansible Role
This Ansible role configures a load balancer for a K3s cluster using Nginx.
## Role Variables
- `k3s_loadbalancer_nginx_config_path`: The path to the Nginx configuration file. Defaults to `/etc/nginx/nginx.conf`.
- `domain`: The domain name to use for the load balancer. Defaults to `{{ internal_domain }}`.
- `k3s.loadbalancer.default_port`: The default port for the K3s API server. Defaults to `6443`.
- `k3s_server_ips`: A list of IP addresses for the K3s server nodes. This variable is not defined in the role, so you must provide it.
- `netcup_api_key`: Your Netcup API key.
- `netcup_api_password`: Your Netcup API password.
- `netcup_customer_id`: Your Netcup customer ID.
## Tasks
The role performs the following tasks:
- **Installation:**
- Updates the `apt` cache.
- Installs `qemu-guest-agent`.
- Installs `nginx-full`.
- **Configuration:**
- Templates the Nginx configuration file with dynamic upstreams for the K3s servers.
- Enables and starts the Nginx service.
- **DNS Setup:**
- Sets up a DNS A record for the load balancer using the `community.general.netcup_dns` module.
## Handlers
- `Restart nginx`: Restarts the Nginx service when the configuration file is changed.
## Example Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_loadbalancer
roles:
- role: k3s_loadbalancer
vars:
k3s_server_ips:
- 192.168.1.10
- 192.168.1.11
- 192.168.1.12
netcup_api_key: "your_api_key"
netcup_api_password: "your_api_password"
netcup_customer_id: "your_customer_id"
internal_domain: "example.com"
```

View File

@@ -2,15 +2,13 @@
- name: Template the nginx config file with dynamic upstreams
ansible.builtin.template:
src: templates/nginx.conf.j2
dest: "{{ nginx_config_path }}"
dest: "{{ k3s_loadbalancer_nginx_config_path }}"
owner: root
group: root
mode: "0644"
become: true
notify:
- Restart nginx
vars:
k3s_server_ips: "{{ k3s.server.ips }}"
- name: Enable nginx
ansible.builtin.systemd:

View File

@@ -4,6 +4,14 @@
update_cache: true
become: true
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- qemu-guest-agent
become: true
- name: Install Nginx
ansible.builtin.apt:
name:

View File

@@ -0,0 +1,17 @@
---
- name: Installation
ansible.builtin.include_tasks: installation.yml
- name: Configure
ansible.builtin.include_tasks: configuration.yml
- name: Setup DNS on Netcup
community.general.netcup_dns:
api_key: "{{ netcup_api_key }}"
api_password: "{{ netcup_api_password }}"
customer_id: "{{ netcup_customer_id }}"
domain: "{{ domain }}"
name: "k3s"
type: "A"
value: "{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}"
delegate_to: localhost

View File

@@ -0,0 +1,98 @@
include /etc/nginx/modules-enabled/*.conf;
events {}
stream {
upstream k3s_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:{{ k3s.loadbalancer.default_port }};
{% endfor %}
}
server {
listen {{k3s.loadbalancer.default_port}};
proxy_pass k3s_servers;
}
upstream etcd_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:2379;
{% endfor %}
}
server {
listen 2379;
proxy_pass etcd_servers;
}
upstream dns_servers {
{% for ip in k3s_server_ips %}
server {{ ip }}:53;
{% endfor %}
}
server {
listen 53 udp;
proxy_pass dns_servers;
}
}
# http {
# upstream k3s_servers_http {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:80;
# {% endfor %}
# }
#
# upstream k3s_servers_https {
# least_conn;
# {% for ip in k3s_server_ips %}
# server {{ ip }}:443;
# {% endfor %}
# }
#
# server {
# listen 80;
#
# location / {
# proxy_pass http://k3s_servers_http;
# proxy_set_header Host $http_host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto http;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name staging.k3s.seyshiro.de *.staging.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/staging_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/staging_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
#
# server {
# listen 443 ssl;
#
# server_name k3s.seyshiro.de *.k3s.seyshiro.de;
#
# ssl_certificate /etc/nginx/ssl/production_tls.crt;
# ssl_certificate_key /etc/nginx/ssl/production_tls.key;
#
# location / {
# proxy_pass https://k3s_servers_https;
# proxy_set_header Host $host;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto https;
# }
# }
# }

View File

@@ -0,0 +1,3 @@
k3s_loadbalancer_nginx_config_path: "/etc/nginx/nginx.conf"
domain: "{{ internal_domain }}"

View File

@@ -0,0 +1,49 @@
# K3s Server Ansible Role
This Ansible role installs and configures a K3s server cluster.
## Role Variables
- `k3s_primary_server_ip`: The IP address of the primary K3s server.
- `k3s_server_name`: The server name for the K3s cluster.
- `k3s_cluster_name`: The name for the K3s cluster in the kubeconfig.
- `k3s_user_name`: The user name for the K3s cluster in the kubeconfig.
- `k3s_context_name`: The context name for the K3s cluster in the kubeconfig.
- `k3s_server_token_vault_file`: The path to the Ansible Vault file containing the K3s token. Default is `../vars/group_vars/k3s/secrets_token.yml`.
## Tasks
The main tasks are:
1. **Install dependencies**: Installs `qemu-guest-agent`.
2. **Primary Server Installation**:
- Downloads the K3s installation script.
- Installs the K3s server on the primary node with a TLS SAN.
3. **Pull Token**:
- Retrieves the K3s token from the primary server.
- Stores the token in an Ansible Vault encrypted file.
4. **Secondary Server Installation**:
- Installs K3s on the secondary servers, joining them to the cluster using the token from the vault.
5. **Create Kubeconfig**:
- Slurps the `k3s.yaml` from the primary server.
- Creates a kubeconfig file on the local machine for accessing the cluster.
## Handlers
- `Restart k3s`: Restarts the K3s service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_servers
roles:
- role: k3s_server
vars:
k3s_primary_server_ip: "192.168.1.100"
k3s_server_name: "k3s.example.com"
k3s_cluster_name: "my-k3s-cluster"
k3s_user_name: "my-k3s-user"
k3s_context_name: "my-k3s-context"
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -0,0 +1,87 @@
---
- name: Slurp original k3s.yaml from primary K3s server
ansible.builtin.slurp:
src: /etc/rancher/k3s/k3s.yaml
register: original_k3s_kubeconfig_slurp
become: true
- name: Parse original k3s.yaml content to extract cert data
ansible.builtin.set_fact:
original_parsed_k3s_kubeconfig: "{{ original_k3s_kubeconfig_slurp.content | b64decode | from_yaml }}"
delegate_to: localhost
run_once: true
- name: Set facts for certificate and key data needed by the template
ansible.builtin.set_fact:
k3s_server_ca_data: "{{ original_parsed_k3s_kubeconfig.clusters[0].cluster['certificate-authority-data'] }}"
k3s_client_cert_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-certificate-data'] }}"
k3s_client_key_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-key-data'] }}"
delegate_to: localhost
run_once: true
- name: Decode and save K3s Server CA certificate
ansible.builtin.copy:
content: "{{ k3s_server_ca_data | b64decode }}"
dest: "/tmp/k3s-ca.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client certificate
ansible.builtin.copy:
content: "{{ k3s_client_cert_data | b64decode }}"
dest: "/tmp/k3s-client.crt"
mode: "0644"
delegate_to: localhost
become: false
- name: Decode and save K3s Client key
ansible.builtin.copy:
content: "{{ k3s_client_key_data | b64decode }}"
dest: "/tmp/k3s-client.key"
mode: "0600"
delegate_to: localhost
become: false
- name: Add K3s cluster to kubeconfig
ansible.builtin.command: >
kubectl config set-cluster "{{ k3s_cluster_name }}"
--server="https://{{ k3s_server_name }}:6443"
--certificate-authority=/tmp/k3s-ca.crt
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s user credentials to kubeconfig
ansible.builtin.command: >
kubectl config set-credentials "{{ k3s_user_name }}"
--client-certificate=/tmp/k3s-client.crt
--client-key=/tmp/k3s-client.key
--embed-certs=true
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Add K3s context to kubeconfig
ansible.builtin.command: >
kubectl config set-context "{{ k3s_context_name }}"
--cluster="{{ k3s_cluster_name }}"
--user="{{ k3s_user_name }}"
environment:
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
delegate_to: localhost
become: false
- name: Clean up temporary certificate and key files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s-ca.crt"
- "/tmp/k3s-client.crt"
- "/tmp/k3s-client.key"
delegate_to: localhost
become: false

View File

@@ -1,58 +0,0 @@
---
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Download K3s install script to /tmp/
when: not k3s_status.stat.exists
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s server with node taint and TLS SAN
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
environment:
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
become: true
async: 300
poll: 0
register: k3s_primary_install
- name: Wait for K3s to be installed
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.async_status:
jid: "{{ k3s_primary_install.ansible_job_id }}"
register: k3s_primary_install_status
until: k3s_primary_install_status.finished
retries: 60
delay: 5
become: true
- name: Get K3s token from the first server
when: host.ip == k3s.server.ips[0]
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s.server.ips[0]
when: host.ip == k3s.server.ips[0]
ansible.builtin.set_fact:
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
- name: Install K3s on the secondary servers
when: (host.ip != k3s.server.ips[0] and (not k3s_status.stat.exists))
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--node-taint CriticalAddonsOnly=true:NoExecute \
--tls-san {{ k3s.loadbalancer.ip }}
environment:
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
K3S_TOKEN: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
become: true

View File

@@ -1,2 +1,29 @@
---
- include_tasks: installation.yml
- name: Install dependencies for apt to use repositories over HTTPS
ansible.builtin.apt:
name: "{{ item }}"
state: present
update_cache: true
loop:
- qemu-guest-agent
become: true
- name: See if k3s file exists
ansible.builtin.stat:
path: /usr/local/bin/k3s
register: k3s_status
- name: Install primary k3s server
include_tasks: primary_installation.yml
when: ansible_default_ipv4.address == k3s_primary_server_ip
- name: Get token from primary k3s server
include_tasks: pull_token.yml
- name: Install seconary k3s servers
include_tasks: secondary_installation.yml
when: ansible_default_ipv4.address != k3s_primary_server_ip
- name: Set kubeconfig on localhost
include_tasks: create_kubeconfig.yml
when: ansible_default_ipv4.address == k3s_primary_server_ip

View File

@@ -0,0 +1,14 @@
---
- name: Download K3s install script to /tmp/
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s server with and TLS SAN
ansible.builtin.command: |
/tmp/k3s_install.sh server \
--cluster-init
--tls-san {{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }} \
--tls-san {{ k3s_server_name }}
become: true

View File

@@ -0,0 +1,26 @@
- name: Get K3s token from the first server
when: ansible_default_ipv4.address == k3s_primary_server_ip
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: k3s_token
become: true
- name: Set fact on k3s_primary_server_ip
ansible.builtin.set_fact:
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
when:
- ansible_default_ipv4.address == k3s_primary_server_ip
- name: Write K3s token to local file for encryption
ansible.builtin.copy:
content: |
k3s_token: "{{ k3s_token }}"
dest: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
mode: "0600"
delegate_to: localhost
run_once: true
- name: Encrypt k3s token
ansible.builtin.shell: cd ../; ansible-vault encrypt "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
delegate_to: localhost
run_once: true

View File

@@ -0,0 +1,21 @@
---
- name: Add token vault
ansible.builtin.include_vars:
file: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
name: k3s_token_vault
- name: Download K3s install script to /tmp/
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s_install.sh
mode: "0755"
- name: Install K3s on the secondary servers
ansible.builtin.command: |
/tmp/k3s_install.sh \
--server "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}" \
--tls-san {{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }} \
--tls-san {{ k3s_server_name }}
environment:
K3S_TOKEN: "{{ k3s_token_vault.k3s_token }}"
become: true

View File

@@ -0,0 +1 @@
k3s_server_token_vault_file: ../vars/group_vars/k3s/secrets_token.yml

View File

@@ -0,0 +1,39 @@
# k3s_storage Ansible Role
This role installs and configures a k3s node with storage-specific taints and labels.
## Role Variables
- `k3s.loadbalancer.default_port`: The port of the k3s loadbalancer. Defaults to `6443`.
- `k3s_token`: The token to join the k3s cluster. This is a required variable.
- `hostvars['k3s-loadbalancer'].ansible_default_ipv4.address`: The IP address of the k3s loadbalancer. This is discovered automatically from the host with the name `k3s-loadbalancer`.
## Tasks
The main task includes the following files:
- `requirements.yml`:
- Updates and upgrades system packages.
- Installs `open-iscsi` and `nfs-common`.
- Starts and enables the `open-iscsi` service.
- `installation.yml`:
- Downloads the k3s installation script.
- Installs k3s on the node with the following configurations:
- Taint: `storage=true:NoExecute`
- Label: `longhorn=true`
## Handlers
- `Restart k3s`: Restarts the k3s service.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: k3s_storage_nodes
roles:
- role: k3s_storage
vars:
k3s_token: "your_k3s_token"
```

View File

@@ -3,4 +3,4 @@
service:
name: k3s
state: restarted
become: yes
become: true

View File

@@ -18,6 +18,6 @@
--node-taint storage=true:NoExecute \
--node-label longhorn=true
environment:
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
K3S_TOKEN: "{{ k3s_token }}"
become: true

View File

@@ -0,0 +1,44 @@
# Ansible Role: ArgoCD
This role installs and configures ArgoCD in a Kubernetes cluster.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
| Variable | Default | Description |
|---|---|---|
| `argocd_version` | `stable` | The version of ArgoCD to install. |
| `argocd_namespace` | `argocd` | The namespace where ArgoCD will be installed. |
| `argocd_repo` | `https://raw.githubusercontent.com/argoproj/argo-cd/refs/tags/{{ argocd_version }}/manifests/ha/install.yaml` | The URL to the ArgoCD installation manifests. |
| `argocd_git_repository` | `https://github.com/argocd/argocd` | The Git repository URL for ArgoCD to use. |
| `argocd_git_username` | `user` | The username for the Git repository. |
| `argocd_git_pat` | `token` | The personal access token for the Git repository. |
## Tasks
The following tasks are performed by this role:
- **Install ArgoCD**: Creates the ArgoCD namespace and applies the installation manifests.
- **Apply ArgoCD Ingress**: Applies an Ingress resource for the ArgoCD server. **Note:** The template file `ingress.yml.j2` is missing from the role.
- **Apply ArgoCD CM**: Applies a ConfigMap with command parameters for ArgoCD.
- **Apply ArgoCD repository**: Creates a Secret with Git repository credentials.
- **Apply ArgoCD Root Application**: Creates a root Application resource for ArgoCD.
## Handlers
There are no handlers in this role.
## Usage
Here is an example of how to use this role in a playbook:
```yaml
- hosts: kubernetes
roles:
- role: kubernetes_argocd
vars:
argocd_git_repository: "https://github.com/my-org/my-repo.git"
argocd_git_username: "my-user"
argocd_git_pat: "my-token"
```

View File

@@ -0,0 +1,8 @@
---
argocd_version: stable
argocd_namespace: argocd
argocd_repo: "https://raw.githubusercontent.com/argoproj/argo-cd/refs/tags/{{ argocd_version }}/manifests/ha/install.yaml"
argocd_git_repository: https://github.com/argocd/argocd
argocd_git_username: "user"
argocd_git_pat: "token"

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cmd-params-cm
labels:
app.kubernetes.io/name: argocd-cmd-params-cm
app.kubernetes.io/part-of: argocd
data:
redis.server: "argocd-redis-ha-haproxy:6379"
server.insecure: "true"

View File

@@ -0,0 +1,72 @@
---
- name: Install ArgoCD
block:
- name: Create ArgoCD namespace
kubernetes.core.k8s:
name: "{{ argocd_namespace }}"
api_version: v1
kind: Namespace
state: present
- name: Apply ArgoCD manifests
kubernetes.core.k8s:
src: "{{ argocd_repo }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Wait for ArgoCD server to be ready
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
name: argocd-server
namespace: "{{ argocd_namespace }}"
register: rollout_status
until: >
rollout_status.resources[0].status.readyReplicas is defined and
rollout_status.resources[0].status.readyReplicas == rollout_status.resources[0].spec.replicas
retries: 30
delay: 10
- name: Apply ArgoCD Ingress
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'ingress.yml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD CM
kubernetes.core.k8s:
src: "files/argocd-cmd-params-cm.yaml"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD repository
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'repository.yml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10
- name: Apply ArgoCD Root Application
kubernetes.core.k8s:
definition: "{{ lookup('ansible.builtin.template', 'root_application.yml.j2') | from_yaml }}"
state: present
namespace: "{{ argocd_namespace }}"
register: apply_manifests
until: apply_manifests is not failed
retries: 5
delay: 10

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: argocd-repository-https
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
url: {{ argocd_git_repository }}
username: {{ argocd_git_username }}
password: {{ argocd_git_pat }}

View File

@@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: homelab-gitops-root
namespace: argocd
labels:
app.kubernetes.io/name: argocd-root
app.kubernetes.io/instance: homelab
app.kubernetes.io/component: gitops-bootstrap
spec:
project: default
source:
repoURL: {{ argocd_git_repository }}
targetRevision: HEAD
path: cluster-apps/
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ApplyOutOfSyncOnly=true

Some files were not shown because too many files have changed in this diff Show More