Compare commits
26 Commits
elastic_se
...
d8fd094379
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8fd094379 | ||
|
|
76000f8123 | ||
|
|
4aa939426b | ||
|
|
9cce71f73b | ||
|
|
97a5d6c41d | ||
|
|
f1b0cfad2c | ||
|
|
dac0d88d60 | ||
|
|
609e000089 | ||
|
|
3d7f652ff3 | ||
|
|
cb8ccd8f00 | ||
|
|
02168225b1 | ||
|
|
6ff1ccecd0 | ||
|
|
de62327fde | ||
|
|
b70c8408dc | ||
|
|
a913e1cbc0 | ||
|
|
e3c67a32e9 | ||
|
|
8f2998abc0 | ||
|
|
7fcee3912f | ||
|
|
591342f580 | ||
|
|
f2ea03bc01 | ||
|
|
0e8e07ed3e | ||
|
|
a2a58f6343 | ||
|
|
42196a32dc | ||
|
|
6934a9f5fc | ||
|
|
27621aac03 | ||
|
|
56f058c254 |
31
.ansible-lint
Normal file
31
.ansible-lint
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
# .ansible-lint
|
||||
|
||||
# Specify exclude paths to prevent linting vendor roles, etc.
|
||||
exclude_paths:
|
||||
- ./.git/
|
||||
- ./.venv/
|
||||
- ./galaxy_roles/
|
||||
|
||||
# A list of rules to skip. This is a more modern and readable alternative to 'skip_list'.
|
||||
skip_list:
|
||||
- experimental
|
||||
- fqcn-builtins
|
||||
- no-handler
|
||||
- var-naming
|
||||
|
||||
# Enforce certain rules that are not enabled by default.
|
||||
enable_list:
|
||||
- no-free-form
|
||||
- var-spacing
|
||||
- no-log-password
|
||||
- no-relative-path
|
||||
- command-instead-of-module
|
||||
- fqcn[deep]
|
||||
- no-changed-when
|
||||
|
||||
# Offline mode disables any features that require internet access.
|
||||
offline: true
|
||||
|
||||
# Set the desired verbosity level.
|
||||
verbosity: 1
|
||||
17
.editorconfig
Normal file
17
.editorconfig
Normal file
@@ -0,0 +1,17 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_size = 2
|
||||
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
13
README.md
13
README.md
@@ -72,3 +72,16 @@ sudo vgextend k3s-vg /dev/sda3
|
||||
# Use the newly available storage in the root volume
|
||||
sudo lvresize -l +100%FREE -r /dev/k3s-vg/root
|
||||
```
|
||||
|
||||
## Cloud Init VMs
|
||||
|
||||
```sh
|
||||
# On Hypervisor Host
|
||||
qm resize <vmid> scsi0 +32G
|
||||
# On VM
|
||||
sudo fdisk -l /dev/sda # To check
|
||||
echo 1 | sudo tee /sys/class/block/sda/device/rescan
|
||||
sudo fdisk -l /dev/sda # To check
|
||||
# sudo apt-get install cloud-guest-utils
|
||||
sudo growpart /dev/sda 1
|
||||
```
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
[defaults]
|
||||
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
|
||||
interpreter_python=python3
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Roles.
|
||||
roles_path=./roles
|
||||
|
||||
# (pathlist) Comma separated list of Ansible inventory sources
|
||||
inventory=./inventory/production
|
||||
inventory=./vars/
|
||||
|
||||
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
@@ -33,3 +36,6 @@ skip=dark gray
|
||||
[tags]
|
||||
# (list) default list of tags to skip in your plays, has precedence over Run Tags
|
||||
;skip=
|
||||
|
||||
[inventory]
|
||||
ignore_extensions={{(REJECT_EXTS + ('.orig', '.cfg', '.retry', '.bak'))}}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
34623331393561623539666362643966336661326136363431666465356535343663376236663066
|
||||
3235363061633666626133313363373336656438633566630a383230393161323862303863656464
|
||||
61633861323966343263363466343130306635343539326464363637383139343033656130336464
|
||||
3163373535613961340a643335626165306663363063656339653862393533633534366331336231
|
||||
63393432383731633463323164333831313535373261336166326237306230326465616239306536
|
||||
37663863663161393130373835373062393866633864373465333937633838303130386334356566
|
||||
64303663303862623038646235303934376230393538353466393232363764366339616633343433
|
||||
65343730663864393766313134653335396562646135306637613031333461613965666465376532
|
||||
32643261626665396338313836633337383932616265613662383132303539623239623965333966
|
||||
66333638643635313262616434396164313833303065303662303736303232346535613834643435
|
||||
32316434343231363662393163353832393166643739396165313631363539663439316133616361
|
||||
61623830613035396333303363383332653736666231343763353666356539633433373066613330
|
||||
65656631343764323234333161636632616130353139626362343361386535313336666566636464
|
||||
35323434656439346262336335383366626565333765343562633236636132636532333761663535
|
||||
31383565313436633438633336306430343733663539666631386532313836623166356332626664
|
||||
39653762353265643861633237326662383466373539633732323833376238383963393837636466
|
||||
66656631666131623166393731643537393161303636353932653062363137376334356238643064
|
||||
34303666656638396263336639636135393536623037666137653132633264316431656438386432
|
||||
34333632616265343435306365373039653036353337633563393739653632656163316636363336
|
||||
32346638393364353634386231616639386164326531353134366639653837653236333030666139
|
||||
64656334336231636337656233383834343763393738643362626665333362353335656131653165
|
||||
35376330336433383262653039643131313437643265343663626363373439643932643063646439
|
||||
37663630363839643263373630646430386536346132383564396463376361343661346661333636
|
||||
39643961643031626462363537633263393838363262626439313838313039373035373634633462
|
||||
38363938343932626131343966616638323632303636383034383536616164393539343635666166
|
||||
39383434313863356434383961383139623436636230323866396366326665623863336438623335
|
||||
33346634303639643131333933363838666336306438646335343931366437326462376438663837
|
||||
34353938343837663930356464373332356530643231653166616331376335643832316365303164
|
||||
32393062313638393936393863613731363233376537323834623164613231393133353635623866
|
||||
35626337336562653265613730363961633662653331663966333430343462666535306133663835
|
||||
64663539303765366331613666653632313233626231313264346332323266653230323332373836
|
||||
33303564633464333064613431383230383535633362373839323334353162623433646230393838
|
||||
33306162613739393338373361616634396636313765326465393332396537613263383339626666
|
||||
63613162616363363138323965373966353366323463313934356530663931653565656164346363
|
||||
37633862366436623030303233396639393434336438623433383530393836626164353064366432
|
||||
35303532393437316162346366346636633135383938323631316563323935383561326335323438
|
||||
30613266643232656138663431666162663330643133643263343237663565323231316239633037
|
||||
39323732386236396136633539383335646634306139643533666636633131623566333137376236
|
||||
39616134306463613864353135313636343365643437323465643862303137663937376233306261
|
||||
31383862356535646563383438396363323838613237623034656561396163376433663262366137
|
||||
63323562346633303162666530616534386539383238366139376263326265343138373139393432
|
||||
35643335363139373139666230626363386232316536306431653964376333366235303763336135
|
||||
65623231336638643034373932376263636336653561646664366138643031316438316465353363
|
||||
38386539363631393433313664323135646562313537376236653635303263633230383866653039
|
||||
66636534336234363438363139366531653237323137613961383831376665626365393462363834
|
||||
36333965366463636233643433616431376436323535396238363933326363333661326462353161
|
||||
66626435373938633832393662313161663336613862343332643766333633653866316464653735
|
||||
31356135363662633961386264613836323435323836386635336338353663333137336666323531
|
||||
36663731336664633763633634613136663866363530613264356431326539316530326161313362
|
||||
62616539356537353261343464356334636134396664353463623163313765633432653932346136
|
||||
32326239373333643461333733646264353238356134613037663836643131316664653539643839
|
||||
30613235623933356565336630323939633266613164306262386666363137666661666131613962
|
||||
61623930663536646462343264336535353634373833316537613839396566376466653736333830
|
||||
33376663613063326230346439626237373232656665633832373364653931663361666432303166
|
||||
663564323132383864336332363139393534
|
||||
@@ -1,36 +0,0 @@
|
||||
#
|
||||
# Essential
|
||||
#
|
||||
|
||||
root: root
|
||||
user: tudattr
|
||||
timezone: Europe/Berlin
|
||||
puid: "1000"
|
||||
pgid: "1000"
|
||||
pk_path: "/media/veracrypt1/genesis"
|
||||
pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqc9fnzfCz8fQDFzla+D8PBhvaMmFu2aF+TYkkZRxl9 tuan@genesis-2022-01-20"
|
||||
|
||||
public_domain: tudattr.dev
|
||||
internal_domain: seyshiro.de
|
||||
|
||||
#
|
||||
# Packages
|
||||
#
|
||||
|
||||
common_packages:
|
||||
- build-essential
|
||||
- curl
|
||||
- git
|
||||
- iperf3
|
||||
- neovim
|
||||
- rsync
|
||||
- smartmontools
|
||||
- sudo
|
||||
- systemd-timesyncd
|
||||
- tree
|
||||
- screen
|
||||
- bat
|
||||
- fd-find
|
||||
- ripgrep
|
||||
|
||||
arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
@@ -1,28 +0,0 @@
|
||||
db:
|
||||
default_user:
|
||||
user: "postgres"
|
||||
name: "k3s"
|
||||
user: "k3s"
|
||||
password: "{{ vault.k3s.postgres.db.password }}"
|
||||
listen_address: "{{ k3s.db.ip }}"
|
||||
|
||||
k3s:
|
||||
net: "192.168.20.0/24"
|
||||
server:
|
||||
ips:
|
||||
- 192.168.20.21
|
||||
- 192.168.20.24
|
||||
- 192.168.20.30
|
||||
loadbalancer:
|
||||
ip: 192.168.20.22
|
||||
default_port: 6443
|
||||
db:
|
||||
ip: 192.168.20.23
|
||||
default_port: "5432"
|
||||
agent:
|
||||
ips:
|
||||
- 192.168.20.25
|
||||
- 192.168.20.26
|
||||
- 192.168.20.27
|
||||
|
||||
k3s_db_connection_string: "postgres://{{ db.user }}:{{ db.password }}@{{ k3s.db.ip }}:{{ k3s.db.default_port }}/{{ db.name }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "root"
|
||||
ansible_host: 192.168.20.12
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.pve.aya01.root.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "aya01"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.34
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "docker-host00"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.35
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "docker-host01"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.36
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "docker-host02"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.37
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.lb.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "docker-lb"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "root"
|
||||
ansible_host: 192.168.20.14
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.pve.inko.root.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "inko"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.25
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent00"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.26
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent01"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.27
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent02"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.22
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.loadbalancer.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-loadbalancer"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.32
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn00"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.33
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn01"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.31
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn02"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.23
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.postgres.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-postgres"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.21
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server00.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-server00"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.24
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-server01"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.30
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-server02"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
ansible_user: "root"
|
||||
ansible_host: 192.168.20.28
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.pve.lulu.root.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "lulu"
|
||||
ip: "{{ ansible_host }}"
|
||||
@@ -1,55 +0,0 @@
|
||||
[proxmox]
|
||||
aya01
|
||||
lulu
|
||||
inko
|
||||
|
||||
[k3s]
|
||||
k3s-postgres
|
||||
k3s-loadbalancer
|
||||
k3s-server[00:02]
|
||||
k3s-agent[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[vm]
|
||||
k3s-postgres
|
||||
k3s-loadbalancer
|
||||
k3s-agent[00:02]
|
||||
k3s-server[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
docker-host[00:02]
|
||||
|
||||
[k3s_nodes]
|
||||
k3s-server[00:02]
|
||||
k3s-agent[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[docker]
|
||||
docker-host[00:02]
|
||||
docker-lb
|
||||
|
||||
[vps]
|
||||
mii
|
||||
|
||||
[k3s_server]
|
||||
k3s-server[00:02]
|
||||
|
||||
[k3s_agent]
|
||||
k3s-agent[00:02]
|
||||
|
||||
[k3s_storage]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[db]
|
||||
k3s-postgres
|
||||
|
||||
[loadbalancer]
|
||||
k3s-loadbalancer
|
||||
|
||||
[docker_host]
|
||||
docker-host[00:02]
|
||||
|
||||
[docker_lb]
|
||||
docker-lb
|
||||
|
||||
[vm:vars]
|
||||
ansible_ssh_common_args='-o ProxyCommand="ssh -p 22 -W %h:%p -q aya01"'
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
- name: Run the common role on k3s
|
||||
hosts: k3s
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: db
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: postgres
|
||||
tags:
|
||||
- postgres
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
- role: postgres_exporter
|
||||
tags:
|
||||
- postgres_exporter
|
||||
@@ -1,9 +1,7 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: docker_host
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
- name: Set up reverse proxy for docker
|
||||
hosts: docker_lb
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
hosts: docker
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: inventory_hostname in groups["docker_lb"]
|
||||
- role: reverse_proxy
|
||||
tags:
|
||||
- reverse_proxy
|
||||
when: inventory_hostname in groups["docker_lb"]
|
||||
|
||||
5
playbooks/docker.yml
Normal file
5
playbooks/docker.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Setup Docker Hosts
|
||||
ansible.builtin.import_playbook: docker-host.yml
|
||||
- name: Setup Docker load balancer
|
||||
ansible.builtin.import_playbook: docker-lb.yml
|
||||
@@ -1,20 +1,6 @@
|
||||
- name: Set up Agents
|
||||
hosts: k3s_nodes
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
pre_tasks:
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
hosts: k3s
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
@@ -22,10 +8,9 @@
|
||||
- common
|
||||
- role: k3s_agent
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
tags:
|
||||
- k3s_agent
|
||||
- role: node_exporter
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
tags:
|
||||
- node_exporter
|
||||
# - role: node_exporter
|
||||
# when: inventory_hostname in groups["k3s_agent"]
|
||||
# tags:
|
||||
# - node_exporter
|
||||
|
||||
17
playbooks/k3s-loadbalancer.yml
Normal file
17
playbooks/k3s-loadbalancer.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: k3s
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: inventory_hostname in groups["k3s_loadbalancer"]
|
||||
- role: k3s_loadbalancer
|
||||
tags:
|
||||
- k3s_loadbalancer
|
||||
when: inventory_hostname in groups["k3s_loadbalancer"]
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
when: inventory_hostname in groups["k3s_loadbalancer"]
|
||||
@@ -1,16 +1,17 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: k3s_server
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
hosts: k3s
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: inventory_hostname in groups["k3s_server"]
|
||||
- role: k3s_server
|
||||
tags:
|
||||
- k3s_server
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
when: inventory_hostname in groups["k3s_server"]
|
||||
# - role: node_exporter
|
||||
# tags:
|
||||
# - node_exporter
|
||||
# when: inventory_hostname in groups["k3s_server"]
|
||||
|
||||
@@ -1,20 +1,6 @@
|
||||
- name: Set up storage
|
||||
hosts: k3s_nodes
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
pre_tasks:
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: common
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
@@ -22,7 +8,6 @@
|
||||
- common
|
||||
- role: k3s_storage
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
tags:
|
||||
- k3s_storage
|
||||
- role: node_exporter
|
||||
|
||||
16
playbooks/kubernetes_setup.yml
Normal file
16
playbooks/kubernetes_setup.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Setup Kubernetes Cluster
|
||||
hosts: kubernetes
|
||||
any_errors_fatal: true
|
||||
gather_facts: false
|
||||
vars:
|
||||
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
|
||||
roles:
|
||||
- role: kubernetes_traefik
|
||||
when: is_localhost
|
||||
- role: kubernetes_argocd
|
||||
when: is_localhost
|
||||
- role: kubernetes_metallb
|
||||
when: is_localhost
|
||||
- role: kubernetes_cert_manager
|
||||
when: is_localhost
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: loadbalancer
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: loadbalancer
|
||||
tags:
|
||||
- loadbalancer
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
15
playbooks/proxmox.yml
Normal file
15
playbooks/proxmox.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Run proxmox vm playbook
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
vars:
|
||||
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
|
||||
is_proxmox_node: "{{ 'proxmox_nodes' in group_names }}"
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: not is_localhost
|
||||
- role: proxmox
|
||||
tags:
|
||||
- proxmox
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- hosts: db
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
tasks:
|
||||
- name: Print the database connection string
|
||||
debug:
|
||||
msg: "{{ k3s_db_connection_string }}"
|
||||
8
playbooks/traefik.yml
Normal file
8
playbooks/traefik.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: "Enable traefik dashboard"
|
||||
hosts: k3s_servers[0]
|
||||
become: true
|
||||
roles:
|
||||
- role: kubernetes_traefik
|
||||
vars:
|
||||
traefik_dashboard_hostname: "{{ traefik_dashboard_hostname }}"
|
||||
20
requirements.txt
Normal file
20
requirements.txt
Normal file
@@ -0,0 +1,20 @@
|
||||
cachetools==5.5.2
|
||||
certifi==2025.1.31
|
||||
charset-normalizer==3.4.1
|
||||
durationpy==0.10
|
||||
google-auth==2.40.3
|
||||
idna==3.10
|
||||
kubernetes==33.1.0
|
||||
nc-dnsapi==0.1.3
|
||||
oauthlib==3.3.1
|
||||
proxmoxer==2.2.0
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.2
|
||||
python-dateutil==2.9.0.post0
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
requests-oauthlib==2.0.0
|
||||
rsa==4.9.1
|
||||
six==1.17.0
|
||||
urllib3==2.3.0
|
||||
websocket-client==1.8.0
|
||||
80
roles/common/files/ghostty/infocmp
Normal file
80
roles/common/files/ghostty/infocmp
Normal file
@@ -0,0 +1,80 @@
|
||||
xterm-ghostty|ghostty|Ghostty,
|
||||
am, bce, ccc, hs, km, mc5i, mir, msgr, npc, xenl, AX, Su, Tc, XT, fullkbd,
|
||||
colors#0x100, cols#80, it#8, lines#24, pairs#0x7fff,
|
||||
acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
|
||||
bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, civis=\E[?25l,
|
||||
clear=\E[H\E[2J, cnorm=\E[?12l\E[?25h, cr=\r,
|
||||
csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
|
||||
cud=\E[%p1%dB, cud1=\n, cuf=\E[%p1%dC, cuf1=\E[C,
|
||||
cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
|
||||
cvvis=\E[?12;25h, dch=\E[%p1%dP, dch1=\E[P, dim=\E[2m,
|
||||
dl=\E[%p1%dM, dl1=\E[M, dsl=\E]2;\007, ech=\E[%p1%dX,
|
||||
ed=\E[J, el=\E[K, el1=\E[1K, flash=\E[?5h$<100/>\E[?5l,
|
||||
fsl=^G, home=\E[H, hpa=\E[%i%p1%dG, ht=^I, hts=\EH,
|
||||
ich=\E[%p1%d@, ich1=\E[@, il=\E[%p1%dL, il1=\E[L, ind=\n,
|
||||
indn=\E[%p1%dS,
|
||||
initc=\E]4;%p1%d;rgb:%p2%{255}%*%{1000}%/%2.2X/%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\E\\,
|
||||
invis=\E[8m, kDC=\E[3;2~, kEND=\E[1;2F, kHOM=\E[1;2H,
|
||||
kIC=\E[2;2~, kLFT=\E[1;2D, kNXT=\E[6;2~, kPRV=\E[5;2~,
|
||||
kRIT=\E[1;2C, kbs=^?, kcbt=\E[Z, kcub1=\EOD, kcud1=\EOB,
|
||||
kcuf1=\EOC, kcuu1=\EOA, kdch1=\E[3~, kend=\EOF, kent=\EOM,
|
||||
kf1=\EOP, kf10=\E[21~, kf11=\E[23~, kf12=\E[24~,
|
||||
kf13=\E[1;2P, kf14=\E[1;2Q, kf15=\E[1;2R, kf16=\E[1;2S,
|
||||
kf17=\E[15;2~, kf18=\E[17;2~, kf19=\E[18;2~, kf2=\EOQ,
|
||||
kf20=\E[19;2~, kf21=\E[20;2~, kf22=\E[21;2~,
|
||||
kf23=\E[23;2~, kf24=\E[24;2~, kf25=\E[1;5P, kf26=\E[1;5Q,
|
||||
kf27=\E[1;5R, kf28=\E[1;5S, kf29=\E[15;5~, kf3=\EOR,
|
||||
kf30=\E[17;5~, kf31=\E[18;5~, kf32=\E[19;5~,
|
||||
kf33=\E[20;5~, kf34=\E[21;5~, kf35=\E[23;5~,
|
||||
kf36=\E[24;5~, kf37=\E[1;6P, kf38=\E[1;6Q, kf39=\E[1;6R,
|
||||
kf4=\EOS, kf40=\E[1;6S, kf41=\E[15;6~, kf42=\E[17;6~,
|
||||
kf43=\E[18;6~, kf44=\E[19;6~, kf45=\E[20;6~,
|
||||
kf46=\E[21;6~, kf47=\E[23;6~, kf48=\E[24;6~,
|
||||
kf49=\E[1;3P, kf5=\E[15~, kf50=\E[1;3Q, kf51=\E[1;3R,
|
||||
kf52=\E[1;3S, kf53=\E[15;3~, kf54=\E[17;3~,
|
||||
kf55=\E[18;3~, kf56=\E[19;3~, kf57=\E[20;3~,
|
||||
kf58=\E[21;3~, kf59=\E[23;3~, kf6=\E[17~, kf60=\E[24;3~,
|
||||
kf61=\E[1;4P, kf62=\E[1;4Q, kf63=\E[1;4R, kf7=\E[18~,
|
||||
kf8=\E[19~, kf9=\E[20~, khome=\EOH, kich1=\E[2~,
|
||||
kind=\E[1;2B, kmous=\E[<, knp=\E[6~, kpp=\E[5~,
|
||||
kri=\E[1;2A, oc=\E]104\007, op=\E[39;49m, rc=\E8,
|
||||
rep=%p1%c\E[%p2%{1}%-%db, rev=\E[7m, ri=\EM,
|
||||
rin=\E[%p1%dT, ritm=\E[23m, rmacs=\E(B, rmam=\E[?7l,
|
||||
rmcup=\E[?1049l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
|
||||
rmul=\E[24m, rs1=\E]\E\\\Ec, sc=\E7,
|
||||
setab=\E[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m,
|
||||
setaf=\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m,
|
||||
sgr=%?%p9%t\E(0%e\E(B%;\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m,
|
||||
sgr0=\E(B\E[m, sitm=\E[3m, smacs=\E(0, smam=\E[?7h,
|
||||
smcup=\E[?1049h, smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m,
|
||||
smul=\E[4m, tbc=\E[3g, tsl=\E]2;, u6=\E[%i%d;%dR, u7=\E[6n,
|
||||
u8=\E[?%[;0123456789]c, u9=\E[c, vpa=\E[%i%p1%dd,
|
||||
BD=\E[?2004l, BE=\E[?2004h, Clmg=\E[s,
|
||||
Cmg=\E[%i%p1%d;%p2%ds, Dsmg=\E[?69l, E3=\E[3J,
|
||||
Enmg=\E[?69h, Ms=\E]52;%p1%s;%p2%s\007, PE=\E[201~,
|
||||
PS=\E[200~, RV=\E[>c, Se=\E[2 q,
|
||||
Setulc=\E[58:2::%p1%{65536}%/%d:%p1%{256}%/%{255}%&%d:%p1%{255}%&%d%;m,
|
||||
Smulx=\E[4:%p1%dm, Ss=\E[%p1%d q,
|
||||
Sync=\E[?2026%?%p1%{1}%-%tl%eh%;,
|
||||
XM=\E[?1006;1000%?%p1%{1}%=%th%el%;, XR=\E[>0q,
|
||||
fd=\E[?1004l, fe=\E[?1004h, kDC3=\E[3;3~, kDC4=\E[3;4~,
|
||||
kDC5=\E[3;5~, kDC6=\E[3;6~, kDC7=\E[3;7~, kDN=\E[1;2B,
|
||||
kDN3=\E[1;3B, kDN4=\E[1;4B, kDN5=\E[1;5B, kDN6=\E[1;6B,
|
||||
kDN7=\E[1;7B, kEND3=\E[1;3F, kEND4=\E[1;4F,
|
||||
kEND5=\E[1;5F, kEND6=\E[1;6F, kEND7=\E[1;7F,
|
||||
kHOM3=\E[1;3H, kHOM4=\E[1;4H, kHOM5=\E[1;5H,
|
||||
kHOM6=\E[1;6H, kHOM7=\E[1;7H, kIC3=\E[2;3~, kIC4=\E[2;4~,
|
||||
kIC5=\E[2;5~, kIC6=\E[2;6~, kIC7=\E[2;7~, kLFT3=\E[1;3D,
|
||||
kLFT4=\E[1;4D, kLFT5=\E[1;5D, kLFT6=\E[1;6D,
|
||||
kLFT7=\E[1;7D, kNXT3=\E[6;3~, kNXT4=\E[6;4~,
|
||||
kNXT5=\E[6;5~, kNXT6=\E[6;6~, kNXT7=\E[6;7~,
|
||||
kPRV3=\E[5;3~, kPRV4=\E[5;4~, kPRV5=\E[5;5~,
|
||||
kPRV6=\E[5;6~, kPRV7=\E[5;7~, kRIT3=\E[1;3C,
|
||||
kRIT4=\E[1;4C, kRIT5=\E[1;5C, kRIT6=\E[1;6C,
|
||||
kRIT7=\E[1;7C, kUP=\E[1;2A, kUP3=\E[1;3A, kUP4=\E[1;4A,
|
||||
kUP5=\E[1;5A, kUP6=\E[1;6A, kUP7=\E[1;7A, kxIN=\E[I,
|
||||
kxOUT=\E[O, rmxx=\E[29m, rv=\E\\[[0-9]+;[0-9]+;[0-9]+c,
|
||||
setrgbb=\E[48:2:%p1%d:%p2%d:%p3%dm,
|
||||
setrgbf=\E[38:2:%p1%d:%p2%d:%p3%dm, smxx=\E[9m,
|
||||
xm=\E[<%i%p3%d;%p1%d;%p2%d;%?%p4%tM%em%;,
|
||||
xr=\EP>\\|[ -~]+a\E\\,
|
||||
19
roles/common/files/ssh/root/sshd_config
Normal file
19
roles/common/files/ssh/root/sshd_config
Normal file
@@ -0,0 +1,19 @@
|
||||
Protocol 2
|
||||
PermitRootLogin yes
|
||||
MaxAuthTries 3
|
||||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
UsePAM yes
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding yes
|
||||
X11Forwarding no
|
||||
PrintMotd no
|
||||
TCPKeepAlive no
|
||||
ClientAliveCountMax 2
|
||||
TrustedUserCAKeys /etc/ssh/vault-ca.pub
|
||||
UseDNS yes
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
Protocol 2
|
||||
PermitRootLogin no
|
||||
MaxAuthTries 3
|
||||
@@ -13,6 +12,7 @@ X11Forwarding no
|
||||
PrintMotd no
|
||||
TCPKeepAlive no
|
||||
ClientAliveCountMax 2
|
||||
TrustedUserCAKeys /etc/ssh/vault-ca.pub
|
||||
UseDNS yes
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
1
roles/common/files/ssh/vault-ca.pub
Normal file
1
roles/common/files/ssh/vault-ca.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxIbkko72kVSfYDjJpiMH9SjHUGqBn3MbBvmotsPQhybFgnnkBpX/3fM9olP+Z6PGsmbOEs0fOjPS6uY5hjKcKsyHdZfS6cA4wjY/DL8fwATAW5FCDBtMpdg2/sb8j9jutHHs4sQeRBolVwKcv+ZAaJNnOzNHwxVUfT9bNwShthnAFjkY7oZo657FRomlkDJjmGQuratP0veKA8jYzqqPWwWidTGQerLYTyJ3Z8pbQa5eN7svrvabjjDLbVTDESE8st9WEmwvAwoj7Kz+WovCy0Uz7LRFVmaRiapM8SXtPPUC0xfyzAB3NxwBtxizdUMlShvLcL6cujcUBMulVMpsqEaOESTpmVTrMJhnJPZG/3j9ziGoYIa6hMj1J9/qLQ5dDNVVXMxw99G31x0LJoy12IE90P4Cahux8iN0Cp4oB4+B6/qledxs1fcRzsnQY/ickjKhqcJwgHzsnwjDkeYRaYte5x4f/gJ77kA20nPto7mxr2mhWot/i9B1KlMURVXOH/q4nrzhJ0hPJpM0UtzQ58TmzE4Osf/B5yoe8V//6XnelbmG/nKCIzg12d7PvaLjbFMn8IgOwDMRlip+vpyadRr/+pCawrfo4vLF7BsnJ84aoByIpbwaysgaYHtjfZWImorMVkgviC4O6Hn9/ZiLNze2A9DaNUnLVJ0nYNbmv9Q==
|
||||
@@ -2,11 +2,23 @@
|
||||
- name: Copy bash-configs
|
||||
ansible.builtin.template:
|
||||
src: "files/bash/{{ item }}"
|
||||
dest: "/home/{{ user }}/.{{ item }}"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
dest: "{{ ansible_env.HOME }}/.{{ item }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- bashrc
|
||||
- bash_aliases
|
||||
become: true
|
||||
|
||||
- name: Copy ghostty infocmp
|
||||
ansible.builtin.copy:
|
||||
src: files/ghostty/infocmp
|
||||
dest: "{{ ansible_env.HOME }}/ghostty"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "0644"
|
||||
register: ghostty_terminfo
|
||||
|
||||
- name: Compile ghostty terminalinfo
|
||||
ansible.builtin.command: "tic -x {{ ansible_env.HOME }}/ghostty"
|
||||
when: ghostty_terminfo.changed
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
url: https://raw.githubusercontent.com/eza-community/eza/main/deb.asc
|
||||
dest: /etc/apt/keyrings/gierens.asc
|
||||
mode: "0644"
|
||||
register: gpg_key_result
|
||||
become: true
|
||||
|
||||
- name: Add Gierens repository to apt sources
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
---
|
||||
- name: Set a hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ host.hostname }}"
|
||||
name: "{{ inventory_hostname }}"
|
||||
become: true
|
||||
|
||||
- name: Update /etc/hosts to reflect the new hostname
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: '^127\.0\.1\.1'
|
||||
line: "127.0.1.1 {{ host.hostname }}"
|
||||
line: "127.0.1.1 {{ inventory_hostname }}"
|
||||
state: present
|
||||
backup: true
|
||||
become: true
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: Configure Time
|
||||
ansible.builtin.include_tasks: time.yml
|
||||
- name: Configure Hostname
|
||||
ansible.builtin.include_tasks: hostname.yml
|
||||
- name: Configure Packages
|
||||
ansible.builtin.include_tasks: packages.yml
|
||||
- name: Configure Hostname
|
||||
ansible.builtin.include_tasks: hostname.yml
|
||||
- name: Configure Extra-Packages
|
||||
ansible.builtin.include_tasks: extra_packages.yml
|
||||
- name: Configure Bash
|
||||
|
||||
@@ -5,9 +5,24 @@
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Install base packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Update and upgrade packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
- name: Install base packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
@@ -1,17 +1,28 @@
|
||||
---
|
||||
- name: Copy sshd_config
|
||||
- name: Copy user sshd_config
|
||||
ansible.builtin.template:
|
||||
src: templates/ssh/sshd_config
|
||||
src: files/ssh/user/sshd_config
|
||||
dest: /etc/ssh/sshd_config
|
||||
mode: "644"
|
||||
backup: true
|
||||
notify:
|
||||
- Restart sshd
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Copy root sshd_config
|
||||
ansible.builtin.template:
|
||||
src: files/ssh/root/sshd_config
|
||||
dest: /etc/ssh/sshd_config
|
||||
mode: "644"
|
||||
backup: true
|
||||
notify:
|
||||
- Restart sshd
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
- name: Copy pubkey
|
||||
ansible.builtin.copy:
|
||||
content: "{{ pubkey }}"
|
||||
dest: "/home/{{ user }}/.ssh/authorized_keys"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
src: files/ssh/vault-ca.pub
|
||||
dest: "/etc/ssh/vault-ca.pub"
|
||||
mode: "644"
|
||||
become: true
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
---
|
||||
- name: Set timezone to "{{ timezone }}"
|
||||
- name: Set timezone
|
||||
community.general.timezone:
|
||||
name: "{{ timezone }}"
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Set timezone
|
||||
community.general.timezone:
|
||||
name: "{{ timezone }}"
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
15
roles/common/vars/main.yml
Normal file
15
roles/common/vars/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
common_packages:
|
||||
- build-essential
|
||||
- curl
|
||||
- git
|
||||
- iperf3
|
||||
- neovim
|
||||
- rsync
|
||||
- smartmontools
|
||||
- sudo
|
||||
- systemd-timesyncd
|
||||
- tree
|
||||
- screen
|
||||
- bat
|
||||
- fd-find
|
||||
- ripgrep
|
||||
@@ -8,4 +8,14 @@
|
||||
- name: Restart compose
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ docker.directories.compose }}"
|
||||
state: restarted
|
||||
state: present
|
||||
retries: 3
|
||||
delay: 5
|
||||
become: true
|
||||
|
||||
- name: Restart host
|
||||
ansible.builtin.reboot:
|
||||
connect_timeout: 5
|
||||
reboot_timeout: 600
|
||||
test_command: whoami
|
||||
become: true
|
||||
|
||||
50
roles/docker_host/tasks/10_setup.yml
Normal file
50
roles/docker_host/tasks/10_setup.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Check if debian.sources file exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/apt/sources.list.d/debian.sources
|
||||
register: debian_sources_stat
|
||||
|
||||
- name: Replace Components line to include non-free and non-free-firmware
|
||||
ansible.builtin.replace:
|
||||
path: /etc/apt/sources.list.d/debian.sources
|
||||
regexp: "^Components:.*$"
|
||||
replace: "Components: main non-free non-free-firmware"
|
||||
when: debian_sources_stat.stat.exists
|
||||
become: true
|
||||
|
||||
- name: Setup VM Packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
loop: "{{ docker_host_package_common_dependencies }}"
|
||||
become: true
|
||||
|
||||
- name: Gather installed package facts
|
||||
ansible.builtin.package_facts:
|
||||
manager: auto
|
||||
|
||||
- name: Filter for specific cloud kernel packages
|
||||
ansible.builtin.set_fact:
|
||||
cloud_kernel_packages: >-
|
||||
{{
|
||||
ansible_facts.packages.keys()
|
||||
| select('search', 'linux-image')
|
||||
| select('search', 'cloud')
|
||||
| list
|
||||
}}
|
||||
|
||||
- name: Use the list to remove the found packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ cloud_kernel_packages }}"
|
||||
state: absent
|
||||
autoremove: true
|
||||
when: cloud_kernel_packages | length > 0
|
||||
become: true
|
||||
|
||||
- name: Restart host
|
||||
ansible.builtin.reboot:
|
||||
connect_timeout: 5
|
||||
reboot_timeout: 600
|
||||
test_command: whoami
|
||||
become: true
|
||||
@@ -5,10 +5,12 @@
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Append the group docker to "{{ user }}"
|
||||
- name: Append the group docker to "{{ ansible_user_id }}"
|
||||
ansible.builtin.user:
|
||||
name: "{{ user }}"
|
||||
name: "{{ ansible_user_id }}"
|
||||
shell: /bin/bash
|
||||
groups: docker
|
||||
append: true
|
||||
become: true
|
||||
notify:
|
||||
- Restart host
|
||||
@@ -9,19 +9,20 @@
|
||||
- /media/series
|
||||
- /media/movies
|
||||
- /media/songs
|
||||
- "{{ docker.directories.opt }}"
|
||||
- "{{ docker.directories.local }}"
|
||||
- "{{ docker.directories.config }}"
|
||||
- "{{ docker.directories.compose }}"
|
||||
- /opt/local
|
||||
become: true
|
||||
|
||||
- name: Set ownership to {{ user }}
|
||||
- name: Set ownership to {{ ansible_user_id }}
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
loop:
|
||||
- "{{ docker.directories.opt }}"
|
||||
- /opt/local
|
||||
- "{{ docker.directories.local }}"
|
||||
- "{{ docker.directories.config }}"
|
||||
- "{{ docker.directories.compose }}"
|
||||
- /media
|
||||
become: true
|
||||
|
||||
31
roles/docker_host/tasks/50_provision.yml
Normal file
31
roles/docker_host/tasks/50_provision.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Set fact if this host should run Keycloak
|
||||
ansible.builtin.set_fact:
|
||||
is_keycloak_host: "{{ inventory_hostname in (services | selectattr('name', 'equalto', 'keycloak') | map(attribute='vm') | first) }}"
|
||||
|
||||
- name: Create Keycloak directories
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker.directories.local }}/keycloak/"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
when: is_keycloak_host | bool
|
||||
become: true
|
||||
|
||||
- name: Setup Keycloak realms
|
||||
ansible.builtin.template:
|
||||
src: "templates/keycloak/realm.json.j2"
|
||||
dest: "{{ docker.directories.local }}/keycloak/{{ keycloak.realm }}-realm.json"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "644"
|
||||
backup: true
|
||||
when: is_keycloak_host | bool
|
||||
loop: "{{ keycloak_config.realms }}"
|
||||
loop_control:
|
||||
loop_var: keycloak
|
||||
notify:
|
||||
- Restart docker
|
||||
- Restart compose
|
||||
become: true
|
||||
@@ -3,8 +3,8 @@
|
||||
ansible.builtin.template:
|
||||
src: "templates/compose.yaml.j2"
|
||||
dest: "{{ docker.directories.compose }}/compose.yaml"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "644"
|
||||
backup: true
|
||||
notify:
|
||||
@@ -1,18 +1,20 @@
|
||||
---
|
||||
- name: Setup VM
|
||||
ansible.builtin.include_tasks: setup.yml
|
||||
|
||||
ansible.builtin.include_tasks: 10_setup.yml
|
||||
- name: Install docker
|
||||
ansible.builtin.include_tasks: installation.yml
|
||||
ansible.builtin.include_tasks: 20_installation.yml
|
||||
|
||||
- name: Setup user and group for docker
|
||||
ansible.builtin.include_tasks: user_group_setup.yml
|
||||
ansible.builtin.include_tasks: 30_user_group_setup.yml
|
||||
|
||||
- name: Setup directory structure for docker
|
||||
ansible.builtin.include_tasks: directory_setup.yml
|
||||
ansible.builtin.include_tasks: 40_directory_setup.yml
|
||||
|
||||
- name: Deploy configs
|
||||
ansible.builtin.include_tasks: 50_provision.yml
|
||||
|
||||
- name: Deploy docker compose
|
||||
ansible.builtin.include_tasks: deploy_compose.yml
|
||||
ansible.builtin.include_tasks: 60_deploy_compose.yml
|
||||
|
||||
- name: Publish metrics
|
||||
ansible.builtin.include_tasks: export.yml
|
||||
ansible.builtin.include_tasks: 70_export.yml
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Enable HW accelerate for VM
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- firmware-misc-nonfree
|
||||
- nfs-common
|
||||
become: true
|
||||
@@ -1,12 +1,13 @@
|
||||
services:
|
||||
{% for service in services %}
|
||||
{% if inventory_hostname in service.vm %}
|
||||
{{service.name}}:
|
||||
|
||||
{{ service.name }}:
|
||||
container_name: {{ service.container_name }}
|
||||
image: {{ service.image }}
|
||||
restart: {{ service.restart }}
|
||||
restart: unless-stopped
|
||||
{% if service.network_mode is not defined %}
|
||||
hostname: {{service.name}}
|
||||
hostname: {{ service.name }}
|
||||
networks:
|
||||
- net
|
||||
{% endif %}
|
||||
@@ -15,11 +16,40 @@ services:
|
||||
ports:
|
||||
{% for port in service.ports %}
|
||||
{% if port.internal != 'proxy_only' %}
|
||||
- {{port.external}}:{{port.internal}}
|
||||
- {{ port.external }}:{{ port.internal }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if service.ports is defined and service.ports is iterable %}
|
||||
{% set first_http_port = service.ports | default([]) | selectattr('name', 'defined') | selectattr('name', 'search', 'http') | first %}
|
||||
{% set chosen_http_port_value = none %}
|
||||
{% if first_http_port is not none %}
|
||||
{% if first_http_port.internal is defined and first_http_port.internal == 'proxy_only' %}
|
||||
{% if first_http_port.external is defined %}
|
||||
{% set chosen_http_port_value = first_http_port.external %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% set chosen_http_port_value = first_http_port.internal %}
|
||||
{% endif %}
|
||||
{% if chosen_http_port_value is defined %}
|
||||
healthcheck:
|
||||
{% set healthcheck = 'curl' %}
|
||||
{% if service.healthcheck is defined %}
|
||||
{% set healthcheck = service.healthcheck %}
|
||||
{% endif %}
|
||||
{% if healthcheck == 'curl' %}
|
||||
test: ["CMD", "curl", "-f", "--silent", "--show-error", "--connect-timeout", "5", "http://localhost:{{ chosen_http_port_value }}/"]
|
||||
{% elif healthcheck == 'wget' %}
|
||||
test: ["CMD-SHELL", "wget --quiet --spider --timeout=5 http://localhost:{{ chosen_http_port_value }}/ || exit 1"]
|
||||
{% endif %}
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if service.cap_add is defined and service.cap_add is iterable %}
|
||||
cap_add:
|
||||
{% for cap in service.cap_add %}
|
||||
@@ -41,46 +71,88 @@ services:
|
||||
{% if service.volumes is defined and service.volumes is iterable %}
|
||||
volumes:
|
||||
{% for volume in service.volumes %}
|
||||
- {{volume.external}}:{{volume.internal}}
|
||||
- {{ volume.external }}:{{ volume.internal }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if service.environment is defined and service.environment is iterable %}
|
||||
environment:
|
||||
{% for env in service.environment %}
|
||||
- {{env}}
|
||||
- {{ env }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if service.devices is defined and service.devices is iterable %}
|
||||
devices:
|
||||
{% for device in service.devices %}
|
||||
- {{device.external}}:{{device.internal}}
|
||||
- {{ device.external }}:{{ device.internal }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if service.name == 'paperless' %}
|
||||
|
||||
{{service.name}}-broker:
|
||||
container_name: paperless-broker
|
||||
image: docker.io/library/redis:7
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- /opt/local/paperless/redis/data:/data
|
||||
|
||||
{{service.name}}-postgres:
|
||||
container_name: paperless-postgres
|
||||
image: docker.io/library/postgres:15
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- /opt/local/paperless/db/data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: paperless
|
||||
POSTGRES_USER: paperless
|
||||
POSTGRES_PASSWORD: 5fnhn%u2YWY3paNvMAjdoufYPQ2Hf3Yi
|
||||
{% if service.command is defined and service.command is iterable %}
|
||||
command:
|
||||
{% for command in service.command %}
|
||||
- {{ command }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if service.sub_service is defined and service.sub_service is iterable %}
|
||||
{% for sub in service.sub_service %}
|
||||
{% if sub.name is defined and sub.name == "postgres" %}
|
||||
{{ service.name }}-postgres:
|
||||
container_name: {{ service.name }}-postgres
|
||||
image: docker.io/library/postgres:{{ sub.version }}
|
||||
restart: unless-stopped
|
||||
hostname: {{ service.name }}-postgres
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- /opt/local/{{ service.name }}/postgres/data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_DB: {{ service.name }}
|
||||
POSTGRES_USER: {{ sub.username }}
|
||||
POSTGRES_PASSWORD: {{ sub.password }}
|
||||
{% endif %}
|
||||
{% if sub.name is defined and sub.name == "redis" %}
|
||||
{{ service.name }}-redis:
|
||||
container_name: {{ service.name }}-redis
|
||||
image: docker.io/library/redis:{{ sub.version }}
|
||||
restart: unless-stopped
|
||||
hostname: {{ service.name }}-redis
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- /opt/local/{{ service.name }}/redis/data:/data
|
||||
{% endif %}
|
||||
{% if sub.name is defined and sub.name == "chrome" %}
|
||||
{{ service.name }}-chrome:
|
||||
image: gcr.io/zenika-hub/alpine-chrome:{{ sub.version }}
|
||||
container_name: {{ service.name }}-chrome
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- net
|
||||
command:
|
||||
- --no-sandbox
|
||||
- --disable-gpu
|
||||
- --disable-dev-shm-usage
|
||||
- --remote-debugging-address=0.0.0.0
|
||||
- --remote-debugging-port=9222
|
||||
- --hide-scrollbars
|
||||
{% endif %}
|
||||
{% if sub.name is defined and sub.name == "meilisearch" %}
|
||||
{{ service.name }}-meilisearch:
|
||||
container_name: {{ service.name }}-meilisearch
|
||||
image: getmeili/meilisearch:{{ sub.version }}
|
||||
restart: unless-stopped
|
||||
hostname: {{ service.name }}-meilisearch
|
||||
networks:
|
||||
- net
|
||||
volumes:
|
||||
- /opt/local/{{ service.name }}/mailisearch/data:/meili_data
|
||||
environment:
|
||||
- MEILI_NO_ANALYTICS=true
|
||||
- NEXTAUTH_SECRET={{ sub.nextauth_secret }}
|
||||
- MEILI_MASTER_KEY={{ sub.meili_master_key }}
|
||||
- OPENAI_API_KEY="{{ sub.openai_key }}"
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
networks:
|
||||
@@ -90,6 +162,3 @@ networks:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.16.69.0/24
|
||||
|
||||
volumes:
|
||||
prometheus_data: {}
|
||||
|
||||
79
roles/docker_host/templates/keycloak/realm.json.j2
Normal file
79
roles/docker_host/templates/keycloak/realm.json.j2
Normal file
@@ -0,0 +1,79 @@
|
||||
{
|
||||
"realm": "{{ keycloak.realm }}",
|
||||
"enabled": true,
|
||||
"displayName": "{{ keycloak.display_name }}",
|
||||
"displayNameHtml": "<div class=\"kc-logo-text\">{{keycloak.display_name}}</div>",
|
||||
"bruteForceProtected": true,
|
||||
"users": [
|
||||
{% if keycloak.users is defined and keycloak.users is iterable %}
|
||||
{% for user in keycloak.users %}
|
||||
{
|
||||
"username": "{{ user.username }}",
|
||||
"enabled": true,
|
||||
"credentials": [
|
||||
{
|
||||
"type": "password",
|
||||
"value": "{{ user.password }}",
|
||||
"temporary": false
|
||||
}
|
||||
],
|
||||
"realmRoles": [
|
||||
{% for realm_role in user.realm_roles %}
|
||||
"{{ realm_role }}"{%- if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
],
|
||||
"clientRoles": {
|
||||
"account": [
|
||||
{% for account in user.client_roles.account %}
|
||||
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
},{% if not loop.last %}{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{
|
||||
"username": "{{ keycloak.admin.username }}",
|
||||
"enabled": true,
|
||||
"credentials": [
|
||||
{
|
||||
"type": "password",
|
||||
"value": "{{ keycloak.admin.password }}",
|
||||
"temporary": false
|
||||
}
|
||||
],
|
||||
"realmRoles": [
|
||||
{% for realm_role in keycloak.admin.realm_roles %}
|
||||
"{{ realm_role }}"{% if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
],
|
||||
"clientRoles": {
|
||||
"realm-management": [
|
||||
{% for realm_management in keycloak.admin.client_roles.realm_management %}
|
||||
"{{ realm_management }}"{%- if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
],
|
||||
"account": [
|
||||
{% for account in keycloak.admin.client_roles.account %}
|
||||
"{{ account }}"{%- if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"roles": {
|
||||
"realm": [
|
||||
{% for role in keycloak.roles.realm %}
|
||||
{
|
||||
"name": "{{ role.name }}",
|
||||
"description": "{{ role.name }}"
|
||||
}{% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
},
|
||||
"defaultRoles": [
|
||||
{% for role in keycloak.roles.default_roles %}
|
||||
"{{ role }}"{% if not loop.last %},{% endif %}{{''}}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
9
roles/docker_host/vars/main.yml
Normal file
9
roles/docker_host/vars/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
docker_host_package_common_dependencies:
|
||||
- nfs-common
|
||||
- firmware-misc-nonfree
|
||||
- linux-image-amd64
|
||||
|
||||
apt_lock_files:
|
||||
- /var/lib/dpkg/lock
|
||||
- /var/lib/dpkg/lock-frontend
|
||||
- /var/cache/apt/archives/lock
|
||||
@@ -11,11 +11,11 @@
|
||||
dest: /tmp/k3s_install.sh
|
||||
mode: "0755"
|
||||
|
||||
- name: Install K3s on the secondary servers
|
||||
- name: Install K3s on agent
|
||||
when: not k3s_status.stat.exists
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh
|
||||
environment:
|
||||
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
|
||||
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
|
||||
K3S_TOKEN: "{{ k3s_token }}"
|
||||
become: true
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
- name: Template the nginx config file with dynamic upstreams
|
||||
ansible.builtin.template:
|
||||
src: templates/nginx.conf.j2
|
||||
dest: "{{ nginx_config_path }}"
|
||||
dest: "{{ k3s_loadbalancer_nginx_config_path }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
@@ -10,7 +10,7 @@
|
||||
notify:
|
||||
- Restart nginx
|
||||
vars:
|
||||
k3s_server_ips: "{{ k3s.server.ips }}"
|
||||
k3s_server_ips: "{{ k3s_primary_server_ip }}"
|
||||
|
||||
- name: Enable nginx
|
||||
ansible.builtin.systemd:
|
||||
16
roles/k3s_loadbalancer/tasks/main.yml
Normal file
16
roles/k3s_loadbalancer/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Installation
|
||||
ansible.builtin.include_tasks: installation.yml
|
||||
- name: Configure
|
||||
ansible.builtin.include_tasks: configuration.yml
|
||||
|
||||
- name: Setup DNS on Netcup
|
||||
community.general.netcup_dns:
|
||||
api_key: "{{ netcup_api_key }}"
|
||||
api_password: "{{ netcup_api_password }}"
|
||||
customer_id: "{{ netcup_customer_id }}"
|
||||
domain: "{{ domain }}"
|
||||
name: "k3s"
|
||||
type: "A"
|
||||
value: "{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}"
|
||||
delegate_to: localhost
|
||||
87
roles/k3s_loadbalancer/templates/nginx.conf.j2
Normal file
87
roles/k3s_loadbalancer/templates/nginx.conf.j2
Normal file
@@ -0,0 +1,87 @@
|
||||
include /etc/nginx/modules-enabled/*.conf;
|
||||
|
||||
events {}
|
||||
|
||||
stream {
|
||||
# TCP Load Balancing for the K3s API
|
||||
upstream k3s_servers {
|
||||
{% for ip in k3s_server_ips %}
|
||||
server {{ ip }}:{{ k3s.loadbalancer.default_port }};
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
server {
|
||||
listen {{k3s.loadbalancer.default_port}};
|
||||
proxy_pass k3s_servers;
|
||||
}
|
||||
|
||||
upstream dns_servers {
|
||||
{% for ip in k3s_server_ips %}
|
||||
server {{ ip }}:53;
|
||||
{% endfor %}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 53 udp;
|
||||
proxy_pass dns_servers;
|
||||
}
|
||||
}
|
||||
|
||||
# http {
|
||||
# upstream k3s_servers_http {
|
||||
# least_conn;
|
||||
# {% for ip in k3s_server_ips %}
|
||||
# server {{ ip }}:80;
|
||||
# {% endfor %}
|
||||
# }
|
||||
#
|
||||
# upstream k3s_servers_https {
|
||||
# least_conn;
|
||||
# {% for ip in k3s_server_ips %}
|
||||
# server {{ ip }}:443;
|
||||
# {% endfor %}
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen 80;
|
||||
#
|
||||
# location / {
|
||||
# proxy_pass http://k3s_servers_http;
|
||||
# proxy_set_header Host $http_host;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header X-Forwarded-Proto http;
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen 443 ssl;
|
||||
#
|
||||
# server_name staging.k3s.seyshiro.de *.staging.k3s.seyshiro.de;
|
||||
#
|
||||
# ssl_certificate /etc/nginx/ssl/staging_tls.crt;
|
||||
# ssl_certificate_key /etc/nginx/ssl/staging_tls.key;
|
||||
#
|
||||
# location / {
|
||||
# proxy_pass https://k3s_servers_https;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header X-Forwarded-Proto https;
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# server {
|
||||
# listen 443 ssl;
|
||||
#
|
||||
# server_name k3s.seyshiro.de *.k3s.seyshiro.de;
|
||||
#
|
||||
# ssl_certificate /etc/nginx/ssl/production_tls.crt;
|
||||
# ssl_certificate_key /etc/nginx/ssl/production_tls.key;
|
||||
#
|
||||
# location / {
|
||||
# proxy_pass https://k3s_servers_https;
|
||||
# proxy_set_header Host $host;
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header X-Forwarded-Proto https;
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
3
roles/k3s_loadbalancer/vars/main.yml
Normal file
3
roles/k3s_loadbalancer/vars/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
k3s_loadbalancer_nginx_config_path: "/etc/nginx/nginx.conf"
|
||||
|
||||
domain: "{{ internal_domain }}"
|
||||
87
roles/k3s_server/tasks/create_kubeconfig.yml
Normal file
87
roles/k3s_server/tasks/create_kubeconfig.yml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
- name: Slurp original k3s.yaml from primary K3s server
|
||||
ansible.builtin.slurp:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
register: original_k3s_kubeconfig_slurp
|
||||
become: true
|
||||
|
||||
- name: Parse original k3s.yaml content to extract cert data
|
||||
ansible.builtin.set_fact:
|
||||
original_parsed_k3s_kubeconfig: "{{ original_k3s_kubeconfig_slurp.content | b64decode | from_yaml }}"
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Set facts for certificate and key data needed by the template
|
||||
ansible.builtin.set_fact:
|
||||
k3s_server_ca_data: "{{ original_parsed_k3s_kubeconfig.clusters[0].cluster['certificate-authority-data'] }}"
|
||||
k3s_client_cert_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-certificate-data'] }}"
|
||||
k3s_client_key_data: "{{ original_parsed_k3s_kubeconfig.users[0].user['client-key-data'] }}"
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Decode and save K3s Server CA certificate
|
||||
ansible.builtin.copy:
|
||||
content: "{{ k3s_server_ca_data | b64decode }}"
|
||||
dest: "/tmp/k3s-ca.crt"
|
||||
mode: "0644"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Decode and save K3s Client certificate
|
||||
ansible.builtin.copy:
|
||||
content: "{{ k3s_client_cert_data | b64decode }}"
|
||||
dest: "/tmp/k3s-client.crt"
|
||||
mode: "0644"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Decode and save K3s Client key
|
||||
ansible.builtin.copy:
|
||||
content: "{{ k3s_client_key_data | b64decode }}"
|
||||
dest: "/tmp/k3s-client.key"
|
||||
mode: "0600"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Add K3s cluster to kubeconfig
|
||||
ansible.builtin.command: >
|
||||
kubectl config set-cluster "{{ k3s_cluster_name }}"
|
||||
--server="https://{{ k3s_server_name }}:6443"
|
||||
--certificate-authority=/tmp/k3s-ca.crt
|
||||
--embed-certs=true
|
||||
environment:
|
||||
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Add K3s user credentials to kubeconfig
|
||||
ansible.builtin.command: >
|
||||
kubectl config set-credentials "{{ k3s_user_name }}"
|
||||
--client-certificate=/tmp/k3s-client.crt
|
||||
--client-key=/tmp/k3s-client.key
|
||||
--embed-certs=true
|
||||
environment:
|
||||
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Add K3s context to kubeconfig
|
||||
ansible.builtin.command: >
|
||||
kubectl config set-context "{{ k3s_context_name }}"
|
||||
--cluster="{{ k3s_cluster_name }}"
|
||||
--user="{{ k3s_user_name }}"
|
||||
environment:
|
||||
KUBECONFIG: "{{ ansible_env.HOME }}/.kube/config"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
|
||||
- name: Clean up temporary certificate and key files
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- "/tmp/k3s-ca.crt"
|
||||
- "/tmp/k3s-client.crt"
|
||||
- "/tmp/k3s-client.key"
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
@@ -1,58 +1,26 @@
|
||||
---
|
||||
- name: See if k3s file exists
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s
|
||||
register: k3s_status
|
||||
|
||||
- name: Download K3s install script to /tmp/
|
||||
when: not k3s_status.stat.exists
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp/k3s_install.sh
|
||||
mode: "0755"
|
||||
|
||||
- name: Install K3s server with node taint and TLS SAN
|
||||
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
|
||||
when: (ansible_default_ipv4.address == k3s_primary_server_ip)
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh server \
|
||||
--node-taint CriticalAddonsOnly=true:NoExecute \
|
||||
--tls-san {{ k3s.loadbalancer.ip }}
|
||||
environment:
|
||||
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
|
||||
--tls-san {{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}
|
||||
--tls-san {{ k3s_server_name }}
|
||||
become: true
|
||||
async: 300
|
||||
poll: 0
|
||||
register: k3s_primary_install
|
||||
|
||||
- name: Wait for K3s to be installed
|
||||
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
|
||||
ansible.builtin.async_status:
|
||||
jid: "{{ k3s_primary_install.ansible_job_id }}"
|
||||
register: k3s_primary_install_status
|
||||
until: k3s_primary_install_status.finished
|
||||
retries: 60
|
||||
delay: 5
|
||||
become: true
|
||||
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0]
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0]
|
||||
ansible.builtin.set_fact:
|
||||
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
- name: Install K3s on the secondary servers
|
||||
when: (host.ip != k3s.server.ips[0] and (not k3s_status.stat.exists))
|
||||
when: (ansible_default_ipv4.address != k3s_primary_server_ip)
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh server \
|
||||
--node-taint CriticalAddonsOnly=true:NoExecute \
|
||||
--tls-san {{ k3s.loadbalancer.ip }}
|
||||
environment:
|
||||
K3S_DATASTORE_ENDPOINT: "{{ k3s_db_connection_string }}"
|
||||
K3S_TOKEN: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
K3S_TOKEN: "{{ k3s_token }}"
|
||||
become: true
|
||||
|
||||
@@ -1,2 +1,21 @@
|
||||
---
|
||||
- name: See if k3s file exists
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s
|
||||
register: k3s_status
|
||||
|
||||
- include_tasks: installation.yml
|
||||
when: not k3s_status.stat.exists
|
||||
|
||||
- include_tasks: create_kubeconfig.yml
|
||||
when: ansible_default_ipv4.address == k3s_primary_server_ip
|
||||
|
||||
- name: Check if k3s token vault file already exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
|
||||
register: k3s_vault_file_stat
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- include_tasks: pull_token.yml
|
||||
when: not k3s_vault_file_stat.stat.exists
|
||||
|
||||
24
roles/k3s_server/tasks/pull_token.yml
Normal file
24
roles/k3s_server/tasks/pull_token.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
- name: Get K3s token from the first server
|
||||
when:
|
||||
- ansible_default_ipv4.address == k3s_primary_server_ip
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s_primary_server_ip
|
||||
ansible.builtin.set_fact:
|
||||
k3s_token: "{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
- name: Write K3s token to local file for encryption
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
k3s_token: "{{ k3s_token }}"
|
||||
dest: "{{ playbook_dir }}/{{ k3s_server_token_vault_file }}"
|
||||
mode: "0600"
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
|
||||
- name: Encrypt k3s token
|
||||
ansible.builtin.shell: cd ../; ansible-vault encrypt "{{ playbook_dir }}/{{k3s_server_token_vault_file}}"
|
||||
delegate_to: localhost
|
||||
1
roles/k3s_server/vars/main.yml
Normal file
1
roles/k3s_server/vars/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
k3s_server_token_vault_file: ../vars/group_vars/k3s/secrets_token.yml
|
||||
@@ -18,6 +18,6 @@
|
||||
--node-taint storage=true:NoExecute \
|
||||
--node-label longhorn=true
|
||||
environment:
|
||||
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
|
||||
K3S_URL: "https://{{ hostvars['k3s-loadbalancer'].ansible_default_ipv4.address }}:{{ k3s.loadbalancer.default_port }}"
|
||||
K3S_TOKEN: "{{ k3s_token }}"
|
||||
become: true
|
||||
|
||||
4
roles/kubernetes_argocd/defaults/main.yml
Normal file
4
roles/kubernetes_argocd/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
argocd_version: stable
|
||||
argocd_namespace: argocd
|
||||
argocd_repo: "https://raw.githubusercontent.com/argoproj/argo-cd/refs/tags/{{ argocd_version }}/manifests/ha/install.yaml"
|
||||
10
roles/kubernetes_argocd/files/argocd-cmd-params-cm.yaml
Normal file
10
roles/kubernetes_argocd/files/argocd-cmd-params-cm.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: argocd-cmd-params-cm
|
||||
labels:
|
||||
app.kubernetes.io/name: argocd-cmd-params-cm
|
||||
app.kubernetes.io/part-of: argocd
|
||||
data:
|
||||
redis.server: "argocd-redis-ha-haproxy:6379"
|
||||
server.insecure: "true"
|
||||
50
roles/kubernetes_argocd/tasks/main.yml
Normal file
50
roles/kubernetes_argocd/tasks/main.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Install ArgoCD
|
||||
block:
|
||||
- name: Create ArgoCD namespace
|
||||
kubernetes.core.k8s:
|
||||
name: "{{ argocd_namespace }}"
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
|
||||
- name: Apply ArgoCD manifests
|
||||
kubernetes.core.k8s:
|
||||
src: "{{ argocd_repo }}"
|
||||
state: present
|
||||
namespace: "{{ argocd_namespace }}"
|
||||
register: apply_manifests
|
||||
until: apply_manifests is not failed
|
||||
retries: 5
|
||||
delay: 10
|
||||
|
||||
- name: Wait for ArgoCD server to be ready
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
name: argocd-server
|
||||
namespace: "{{ argocd_namespace }}"
|
||||
register: rollout_status
|
||||
until: rollout_status.resources[0].status.readyReplicas is defined and rollout_status.resources[0].status.readyReplicas == rollout_status.resources[0].spec.replicas
|
||||
retries: 30
|
||||
delay: 10
|
||||
|
||||
- name: Apply ArgoCD Ingress
|
||||
kubernetes.core.k8s:
|
||||
definition: "{{ lookup('ansible.builtin.template', 'ingress.yml.j2') | from_yaml }}"
|
||||
state: present
|
||||
namespace: "{{ argocd_namespace }}"
|
||||
register: apply_manifests
|
||||
until: apply_manifests is not failed
|
||||
retries: 5
|
||||
delay: 10
|
||||
|
||||
- name: Apply ArgoCD CM
|
||||
kubernetes.core.k8s:
|
||||
src: "files/argocd-cmd-params-cm.yaml"
|
||||
state: present
|
||||
namespace: "{{ argocd_namespace }}"
|
||||
register: apply_manifests
|
||||
until: apply_manifests is not failed
|
||||
retries: 5
|
||||
delay: 10
|
||||
19
roles/kubernetes_argocd/templates/app.yaml.j2
Normal file
19
roles/kubernetes_argocd/templates/app.yaml.j2
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: "{{ item.name }}"
|
||||
namespace: "{{ argocd_namespace }}"
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: "{{ item.repo_url }}"
|
||||
targetRevision: "{{ item.target_revision }}"
|
||||
path: "{{ item.path }}"
|
||||
destination:
|
||||
server: "{{ item.destination_server }}"
|
||||
namespace: "{{ item.destination_namespace }}"
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
27
roles/kubernetes_argocd/templates/ingress.yml.j2
Normal file
27
roles/kubernetes_argocd/templates/ingress.yml.j2
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: argocd-ingress
|
||||
namespace: argocd
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
cert-manager.io/cluster-issuer: "{{ argocd_cert_resolver }}"
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ argocd_hostname }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: argocd-server
|
||||
port:
|
||||
number: 80
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ argocd_hostname }}
|
||||
secretName: k3s-seyshiro-de-tls
|
||||
5
roles/kubernetes_cert_manager/defaults/main.yml
Normal file
5
roles/kubernetes_cert_manager/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
cert_manager_version: "v1.18.2"
|
||||
cert_manager_email: "mail@example.com"
|
||||
cert_manager_manifest: "https://github.com/cert-manager/cert-manager/releases/download/{{ cert_manager_version }}/cert-manager.yaml"
|
||||
cert_manager_issuer_name: "letsencrypt-prod"
|
||||
cert_manager_issuer_env: "staging"
|
||||
77
roles/kubernetes_cert_manager/tasks/main.yml
Normal file
77
roles/kubernetes_cert_manager/tasks/main.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: Ensure cert-manager namespace exists
|
||||
kubernetes.core.k8s:
|
||||
name: cert-manager
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
tags:
|
||||
- cert_manager
|
||||
- namespace
|
||||
|
||||
- name: Create netcup-secret
|
||||
kubernetes.core.k8s:
|
||||
namespace: cert-manager
|
||||
definition: "{{ lookup('ansible.builtin.template', 'netcup.yml.j2') | from_yaml }}"
|
||||
|
||||
- name: Add a repository
|
||||
kubernetes.core.helm_repository:
|
||||
name: cert-manager-webhook-netcup
|
||||
repo_url: https://aellwein.github.io/cert-manager-webhook-netcup/charts/
|
||||
|
||||
- name: Install NetCup Webhook
|
||||
kubernetes.core.helm:
|
||||
name: my-cert-manager-webhook-netcup
|
||||
chart_ref: cert-manager-webhook-netcup/cert-manager-webhook-netcup
|
||||
release_namespace: cert-manager
|
||||
create_namespace: true
|
||||
|
||||
- name: Download cert-manager manifest
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ cert_manager_manifest }}"
|
||||
dest: "/tmp/cert-manager.yaml"
|
||||
mode: "0644"
|
||||
validate_certs: true
|
||||
tags:
|
||||
- cert_manager
|
||||
- download
|
||||
|
||||
- name: Apply cert-manager core manifests
|
||||
kubernetes.core.k8s:
|
||||
src: "/tmp/cert-manager.yaml"
|
||||
state: present
|
||||
tags:
|
||||
- cert_manager
|
||||
- apply_manifest
|
||||
|
||||
- name: Wait for cert-manager deployments to be ready
|
||||
kubernetes.core.k8s_info:
|
||||
api_version: apps/v1
|
||||
kind: Deployment
|
||||
namespace: cert-manager
|
||||
name: "{{ item }}"
|
||||
wait: true
|
||||
wait_timeout: 300
|
||||
loop:
|
||||
- cert-manager
|
||||
- cert-manager-cainjector
|
||||
- cert-manager-webhook
|
||||
tags:
|
||||
- cert_manager
|
||||
- wait_ready
|
||||
|
||||
- name: Create Let's Encrypt ClusterIssuer
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('ansible.builtin.template', 'clusterissuer.yml.j2') | from_yaml }}"
|
||||
tags:
|
||||
- cert_manager
|
||||
- cluster_issuer
|
||||
|
||||
- name: Create Let's Encrypt Certificate
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition: "{{ lookup('ansible.builtin.template', 'certificate.yml.j2') | from_yaml }}"
|
||||
tags:
|
||||
- cert_manager
|
||||
- certificate
|
||||
16
roles/kubernetes_cert_manager/templates/certificate.yml.j2
Normal file
16
roles/kubernetes_cert_manager/templates/certificate.yml.j2
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: k3s-seyshiro-de
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
secretName: k3s-seyshiro-de-tls
|
||||
issuerRef:
|
||||
name: {{ cert_manager_issuer_name }}
|
||||
kind: ClusterIssuer
|
||||
commonName: "*.k3s.seyshiro.de"
|
||||
dnsNames:
|
||||
- "k3s.seyshiro.de"
|
||||
- "*.k3s.seyshiro.de"
|
||||
|
||||
22
roles/kubernetes_cert_manager/templates/clusterissuer.yml.j2
Normal file
22
roles/kubernetes_cert_manager/templates/clusterissuer.yml.j2
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: "{{ cert_manager_issuer_name }}"
|
||||
spec:
|
||||
acme:
|
||||
server: "{% if cert_manager_issuer_env == 'production' %}https://acme-v02.api.letsencrypt.org/directory{% else %}https://acme-staging-v02.api.letsencrypt.org/directory{% endif %}"
|
||||
email: "{{ cert_manager_email }}"
|
||||
privateKeySecretRef:
|
||||
name: "{{ cert_manager_issuer_name }}-account-key"
|
||||
solvers:
|
||||
- selector:
|
||||
dnsZones:
|
||||
- 'k3s.seyshiro.de'
|
||||
dns01:
|
||||
webhook:
|
||||
groupName: com.netcup.webhook
|
||||
solverName: netcup
|
||||
config:
|
||||
secretRef: netcup-secret
|
||||
secretNamespace: cert-manager
|
||||
11
roles/kubernetes_cert_manager/templates/netcup.yml.j2
Normal file
11
roles/kubernetes_cert_manager/templates/netcup.yml.j2
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: netcup-secret
|
||||
type: Opaque
|
||||
data:
|
||||
customer-number: {{ netcup_customer_id | b64encode }}
|
||||
api-key: {{ netcup_api_key |b64encode }}
|
||||
api-password: {{ netcup_api_password | b64encode }}
|
||||
|
||||
4
roles/kubernetes_metallb/defaults/main.yml
Normal file
4
roles/kubernetes_metallb/defaults/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
metallb_version: v0.15.2
|
||||
metallb_ip_range: "192.168.178.200-192.168.178.220"
|
||||
metallb_manifest_url: "https://raw.githubusercontent.com/metallb/metallb/{{ metallb_version }}/config/manifests/metallb-native.yaml"
|
||||
62
roles/kubernetes_metallb/tasks/main.yml
Normal file
62
roles/kubernetes_metallb/tasks/main.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: Ensure metallb-system namespace exists
|
||||
kubernetes.core.k8s:
|
||||
name: metallb-system
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
state: present
|
||||
tags:
|
||||
- metallb
|
||||
- namespace
|
||||
|
||||
- name: Download MetalLB manifest
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ metallb_manifest_url }}"
|
||||
dest: "/tmp/metallb.yaml"
|
||||
mode: "0644"
|
||||
validate_certs: true
|
||||
run_once: true
|
||||
tags:
|
||||
- metallb
|
||||
- download
|
||||
|
||||
- name: Apply MetalLB core manifests
|
||||
kubernetes.core.k8s:
|
||||
src: "/tmp/metallb.yaml"
|
||||
state: present
|
||||
namespace: metallb-system
|
||||
tags:
|
||||
- metallb
|
||||
- apply_manifest
|
||||
|
||||
- name: Create IPAddressPool for MetalLB
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
namespace: metallb-system
|
||||
definition: "{{ lookup('ansible.builtin.template', 'ipaddresspool.yml.j2') | from_yaml }}"
|
||||
tags:
|
||||
- metallb
|
||||
- ip_pool
|
||||
|
||||
- name: Create L2Advertisement for MetalLB
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
namespace: metallb-system
|
||||
definition: "{{ lookup('ansible.builtin.template', 'l2advertisement.yml.j2') | from_yaml }}"
|
||||
tags:
|
||||
- metallb
|
||||
- l2_advertisement
|
||||
|
||||
- name: Setup DNS on Netcup
|
||||
community.general.netcup_dns:
|
||||
api_key: "{{ netcup_api_key }}"
|
||||
api_password: "{{ netcup_api_password }}"
|
||||
customer_id: "{{ netcup_customer_id }}"
|
||||
domain: "{{ domain }}"
|
||||
name: "{{ service.name }}.k3s"
|
||||
type: "A"
|
||||
value: "{{ service.ip }}"
|
||||
loop: "{{ services }}"
|
||||
loop_control:
|
||||
loop_var: service
|
||||
delegate_to: localhost
|
||||
9
roles/kubernetes_metallb/templates/ipaddresspool.yml.j2
Normal file
9
roles/kubernetes_metallb/templates/ipaddresspool.yml.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: default-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- "{{ metallb_ip_range }}"
|
||||
@@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default-l2advertisement
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- default-pool
|
||||
6
roles/kubernetes_nfs/defaults/main.yml
Normal file
6
roles/kubernetes_nfs/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
kubernetes_nfs_helm_name: nfs-subdir-external-provisioner
|
||||
kubernetes_nfs_helm_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
|
||||
kubernetes_nfs_helm_chart: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
|
||||
|
||||
kubernetes_nfs_server_host: 192.168.20.1
|
||||
kubernetes_nfs_server_path: /nfs/
|
||||
16
roles/kubernetes_nfs/tasks/main.yml
Normal file
16
roles/kubernetes_nfs/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
# helm repo add
|
||||
|
||||
- name: Add a repository
|
||||
kubernetes.core.helm_repository:
|
||||
name: "{{ kubernetes_nfs_helm_name }}"
|
||||
repo_url: "{{ kubernetes_nfs_helm_url }}"
|
||||
|
||||
- name: Install NetCup Webhook
|
||||
kubernetes.core.helm:
|
||||
name: "{{ kubernetes_nfs_helm_name }}"
|
||||
chart_ref: "{{ kubernetes_nfs_helm_chart }}"
|
||||
create_namespace: true
|
||||
set_values:
|
||||
- value: "nfs.server={{ kubernetes_nfs_server_host }}"
|
||||
- value: "nfs.path={{ kubernetes_nfs_server_path }}"
|
||||
3
roles/kubernetes_traefik/defaults/main.yml
Normal file
3
roles/kubernetes_traefik/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
traefik_dashboard_hostname: "traefik.example.com"
|
||||
traefik_cert_resolver: "cert_resolver-prod"
|
||||
12
roles/kubernetes_traefik/tasks/main.yml
Normal file
12
roles/kubernetes_traefik/tasks/main.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
# roles/traefik/tasks/main.yml
|
||||
|
||||
- name: "Traefik | Enable dashboard"
|
||||
kubernetes.core.k8s:
|
||||
template: "helmchartconfig.yaml.j2"
|
||||
state: present
|
||||
|
||||
- name: "Traefik | Create dashboard ingress"
|
||||
kubernetes.core.k8s:
|
||||
template: "ingress.yaml.j2"
|
||||
state: present
|
||||
17
roles/kubernetes_traefik/templates/helmchartconfig.yaml.j2
Normal file
17
roles/kubernetes_traefik/templates/helmchartconfig.yaml.j2
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChartConfig
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: kube-system
|
||||
spec:
|
||||
valuesContent: |-
|
||||
logs:
|
||||
access:
|
||||
enabled: true
|
||||
ingressRoute:
|
||||
dashboard:
|
||||
enabled: true
|
||||
websecure:
|
||||
tls:
|
||||
enabled: true
|
||||
27
roles/kubernetes_traefik/templates/ingress.yaml.j2
Normal file
27
roles/kubernetes_traefik/templates/ingress.yaml.j2
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: traefik-dashboard
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
cert-manager.io/cluster-issuer: {{ traefik_cert_resolver }}
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ traefik_dashboard_hostname }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: traefik
|
||||
port:
|
||||
number: 8080
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ traefik_dashboard_hostname }}
|
||||
secretName: k3s-seyshiro-de-tls
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
- name: Installation
|
||||
ansible.builtin.include_tasks: installation.yml
|
||||
- name: Configure
|
||||
ansible.builtin.include_tasks: configuration.yml
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user