Added working traefik configuration/labels for containers

Added new host 'mii'
Added uptime kuma in docker

Signed-off-by: TuDatTr <tuan-dat.tran@tudattr.dev>
pull/1/head
TuDatTr 2023-04-13 18:43:32 +02:00
parent f3254d5b79
commit 7cd43fb8a8
25 changed files with 605 additions and 152 deletions

View File

@ -1,5 +1,9 @@
# TuDatTr IaC
## User
It is expected that a user with sudo privilages is on the target, for me the users name is "tudattr"
you can add such user with the following command `useradd -m -g sudo -s /bin/bash tudattr`
Don't forget to set a password for the new user with `passwd tudattr`
## Backups
Backup for aya01 and raspberry are in a backblaze b2, which gets encrypted on the clientside by rclone.
but first of all we need to create the buckets and provide ansible with the needed information.

View File

@ -14,7 +14,8 @@
- power_management
- role: backblaze
tags:
- backblaze
- backup
- role: docker
tags:
- docker

View File

@ -4,6 +4,7 @@
user: tudattr
timezone: Europe/Berlin
local_domain: borg.land
remote_domain: tudattr.dev
rclone_config: "/root/.config/rclone/"
puid: 1000
pgid: 1000
@ -31,13 +32,14 @@ common_packages:
- git
- iperf3
- git
- tmux
- smartmontools
- vim
- curl
- tree
- rsync
- systemd-timesyncd
- neofetch
- build-essential
#
# Docker
@ -61,24 +63,91 @@ mysql_user: user
aya01_host: "aya01"
aya01_ip: "192.168.20.12"
zoneminder_config: "{{ docker_dir }}/zm/"
#
# ZoneMinder
#
zoneminder_host: "zm"
zoneminder_port: "8081"
zoneminder_root: "{{ docker_dir }}/zm/"
zoneminder_config: "{{ zoneminder_root }}/config/"
zoneminder_log: "{{ zoneminder_root}}/log/"
zoneminder_db: "{{ zoneminder_root}}/db/"
zoneminder_data: "{{ docker_data_dir }}/zm/data/"
#
# Syncthing
#
syncthing_host: "syncthing"
syncthing_port: "8384"
syncthing_data: "{{docker_data_dir}}/syncthing/"
#
# Softserve
#
softserve_data: "{{docker_dir}}/softserve/data"
#
# cupsd
#
cupsd_host: "cupsd"
cupsd_port: "631"
cupsd_config: "{{ docker_dir }}/cupsd/"
#
# Uptime Kuma
#
kuma_host: "uptime"
kuma_port: "3001"
kuma_config: "{{ docker_dir }}/kuma/"
#
# pi
#
traefik_etc: "{{ docker_dir }}/traefik/etc-traefik/"
traefik_var: "{{ docker_dir }}/traefik/var-log/"
ddns_updater_data: "{{ docker_dir }}/ddns-updater/data/"
pi_host: "pi"
pi_ip: "192.168.20.11"
#
# Traefik
#
traefik_host: "traefik"
traefik_user_port: "80"
traefik_admin_port: "8080"
traefik_config: "{{ docker_dir }}/traefik/etc-traefik/"
traefik_data: "{{ docker_dir }}/traefik/var-log/"
#
# DynDns Updater
#
ddns_host: "ddns"
ddns_port: "8000"
ddns_data: "{{ docker_dir }}/ddns-updater/data/"
#
# Home Assistant
#
ha_host: "hass"
ha_port: "8123"
ha_config: "{{ docker_dir }}/home-assistant/config/"
pihole_pihole: "{{ docker_dir }}/pihole/etc-pihole/"
#
# pihole
#
pihole_host: "pihole"
pihole_port: "8089"
pihole_config: "{{ docker_dir }}/pihole/etc-pihole/"
pihole_dnsmasq: "{{ docker_dir }}/pihole/etc-dnsmasq.d/"
#
@ -94,7 +163,6 @@ backblaze_paths:
- "{{ docker_compose_dir }}"
- "{{ docker_dir }}"
#
# samba
#
@ -112,6 +180,7 @@ smb_user: "smbuser"
#
# prometheus/grafana
#
prm_user: "prometheus"
exporter_dir: "{{ docker_dir }}/exporter/"
@ -139,3 +208,9 @@ netdata_port: "19999"
netdata_config: "{{ docker_dir }}/netdata/"
netdata_lib: "{{ docker_data_dir }}/netdata/lib/"
netdata_cache: "{{ docker_data_dir }}/netdata/cache"
#
#
#
swag_port: "443"
swag_config: "{{ docker_dir }}/swag/config/"

5
host_vars/mii.yml Normal file
View File

@ -0,0 +1,5 @@
ansible_user: "{{ user }}"
ansible_host: 202.61.207.139
ansible_port: 22
ansible_ssh_private_key_file: /mnt/veracrypt1/genesis
ansible_become_pass: '{{ vault_mii_tudattr_password }}'

11
mii.yml Normal file
View File

@ -0,0 +1,11 @@
---
- name: Set up Servers
hosts: vps
gather_facts: yes
roles:
- role: common
tags:
- common
- role: docker
tags:
- docker

View File

@ -3,3 +3,6 @@ aya01
[raspberry]
pi
[vps]
mii

View File

@ -12,21 +12,33 @@
tags:
- syncthing
- include_tasks: grafana.yml
tags:
- grafana
#- include_tasks: grafana.yml
# tags:
# - grafana
- include_tasks: softserve.yml
tags:
- softserve
- include_tasks: prometheus.yml
#- include_tasks: prometheus.yml
# tags:
# - prometheus
#
#- include_tasks: netdata.yaml
# tags:
# - netdata
#
- include_tasks: cupsd.yml
tags:
- prometheus
- cupsd
- include_tasks: netdata.yaml
- include_tasks: kuma.yml
tags:
- netdata
- kuma
- include_tasks: traefik.yml
tags:
- traefik
- name: Copy the compose file
template:

View File

@ -0,0 +1,19 @@
---
- name: Create cupsd-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ cupsd_config }}"
become: true
- name: Copy default config
template:
owner: "{{ puid }}"
src: "templates/aya01/cupsd/cupsd.conf"
dest: "{{ cupsd_config }}/cupsd.conf"
mode: '660'
become: true

View File

@ -0,0 +1,30 @@
---
- name: Create zoneminder user
user:
name: zm
uid: 911
shell: /bin/false
become: true
- name: Create Zoneminder config directory
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '700'
state: directory
loop:
- "{{ zoneminder_config }}"
become: true
- name: Create Zoneminder data directory
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '755'
state: directory
loop:
- "{{ zoneminder_data }}"
become: true

View File

@ -0,0 +1,11 @@
---
- name: Create kuma-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ kuma_config }}"
become: true

View File

@ -9,3 +9,7 @@
when: inventory_hostname == "aya01"
tags:
- reload_compose
- include_tasks: mii_compose.yml
when: inventory_hostname == "mii"
tags:
- reload_compose

View File

@ -0,0 +1,18 @@
---
- include_tasks: swag.yml
tags:
- swag
- name: Copy the compose file
template:
src: templates/mii/compose.yaml
dest: "{{ docker_compose_dir }}/compose.yaml"
tags:
- reload_compose
- name: Run docker compose
shell:
cmd: "docker compose up -d"
chdir: "{{ docker_compose_dir }}"
tags:
- reload_compose

View File

@ -3,21 +3,8 @@
- name: Create traefik-config directory
file:
path: "{{ item }}"
owner: 1000
mode: '700'
state: directory
owner: "{{ puid }}"
group: "{{ pgid }}"
state: directory
loop:
- "{{ docker_dir }}/traefik/etc-traefik/"
- "{{ docker_dir }}/traefik/var-log/"
- name: Copy traefik-config
template:
owner: 1000
src: "templates/pi/{{ item }}"
dest: "{{ docker_dir }}/{{ item }}"
mode: '400'
loop:
- "traefik/etc-traefik/traefik.yml"
- "traefik/var-log/access.log"
- "traefik/var-log/traefik.log"
- "{{ swag_config }}"

View File

@ -1,5 +1,20 @@
version: '3'
services:
traefik:
container_name: traefik
image: traefik:latest
restart: unless-stopped
networks:
net: {}
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "{{ traefik_config }}:/etc/traefik/"
- "{{ traefik_data }}:/var/log/"
ports:
- "{{ traefik_user_port}}:80"
- "{{ traefik_admin_port}}:8080"
db:
container_name: zoneminder_db
image: mariadb
@ -8,9 +23,9 @@ services:
- zoneminder
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ zoneminder_config}}/db:/var/lib/mysql"
- "{{ zoneminder_db }}:/var/lib/mysql"
environment:
- "MYSQL_DATABASE=zm"
- "MYSQL_DATABASE={{ zoneminder_host }}"
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
- "MYSQL_USER={{ mysql_user }}"
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
@ -28,24 +43,28 @@ services:
- zoneminder
- net
ports:
- 80:80
- "{{ zoneminder_port }}:80"
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ zoneminder_data }}:/data"
- "{{ zoneminder_config }}/config:/config"
- "{{ zoneminder_config }}/log:/log"
- "{{ zoneminder_config }}:/config"
- "{{ zoneminder_log}}:/log"
- type: tmpfs
target: /dev/shm
tmpfs:
size: 1000000000
environment:
- "MYSQL_DATABASE=zm"
- "MYSQL_DATABASE={{ zoneminder_host }}"
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
- "MYSQL_USER={{ mysql_user }}"
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
- "MAX_LOG_SIZE_BYTES=1000000"
- "MAX_LOG_NUMBER=20"
- "TZ=Europe/Berlin"
labels:
- "traefik.enable=true"
- "traefik.http.routers.zoneminder.rule=Host(`{{ zoneminder_host}}.{{ aya01_host }}.{{ local_domain }}`)"
- "traefik.http.services.zoneminder.loadbalancer.server.port={{ 80 }}"
pihole:
container_name: pihole
@ -57,10 +76,10 @@ services:
- "53:53/tcp"
- "53:53/udp"
- "67:67/udp"
- "8089:80/tcp"
- "{{ pihole_port }}:80/tcp"
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ pihole_pihole }}:/etc/pihole/"
- "{{ pihole_config }}:/etc/pihole/"
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
environment:
- "WEBPASSWORD={{ vault_aya01_pihole_password }}"
@ -73,15 +92,19 @@ services:
- 1.1.1.1
cap_add:
- NET_ADMIN
labels:
- "traefik.enable=true"
- "traefik.http.routers.pihole.rule=Host(`{{ pihole_host }}.{{ aya01_host }}.{{ local_domain }}`)"
- "traefik.http.services.pihole.loadbalancer.server.port={{ 80 }}"
syncthing:
fyncthing:
container_name: syncthing
image: syncthing/syncthing
restart: unless-stopped
networks:
- net
ports:
- 8384:8384 # Web UI
- "{{ syncthing_port }}:8384" # Web UI
- 22000:22000/tcp # TCP file transfers
- 22000:22000/udp # QUIC file transfers
- 21027:21027/udp # Receive local discovery broadcasts
@ -91,22 +114,26 @@ services:
- "PUID={{ puid }}"
- "PGID={{ pgid }}"
hostname: syncthing
labels:
- "traefik.enable=true"
- "traefik.http.routers.syncthing.rule=Host(`{{ syncthing_host }}.{{ aya01_host }}.{{ local_domain }}`)"
- "traefik.http.services.syncthing.loadbalancer.server.port={{ syncthing_port }}"
grafana:
container_name: grafana
image: grafana/grafana-oss
restart: unless-stopped
user: "{{ puid }}:{{ pgid }}"
networks:
- net
ports:
- 3000:3000
volumes:
- "{{ grafana_data }}:/var/lib/grafana/"
- "{{ grafana_log }}:/var/log/grafana/"
environment:
- "GF_LOG_MODE=console file"
hostname: grafana
# grafana:
# container_name: grafana
# image: grafana/grafana-oss
# restart: unless-stopped
# user: "{{ puid }}:{{ pgid }}"
# networks:
# - net
# ports:
# - 3000:3000
# volumes:
# - "{{ grafana_data }}:/var/lib/grafana/"
# - "{{ grafana_log }}:/var/log/grafana/"
# environment:
# - "GF_LOG_MODE=console file"
# hostname: grafana
soft-serve:
container_name: soft-serve
@ -115,60 +142,90 @@ services:
networks:
- net
ports:
- 23231:23231
- 23231:23231 # ssh
volumes:
- "{{ softserve_data }}:/soft-serve"
prometheus:
container_name: prometheus
image: prom/prometheus
# prometheus:
# container_name: prometheus
# image: prom/prometheus
# restart: unless-stopped
# networks:
# - net
# ports:
# - "{{ prm_port }}:9090"
# volumes:
# - "{{ prm_config }}:/etc/prometheus"
# exporter_mikrotik:
# container_name: exporter_mikrotik
# image: "nshttpd/mikrotik-exporter:{{ e_mikrotik_version }}"
# restart: unless-stopped
# user: "{{ puid }}:{{ pgid }}"
# networks:
# - net
# ports:
# - "{{ e_mikrotik_port }}:9436"
# volumes:
# - "{{ e_mikrotik_config }}:/config"
# environment:
# - "CONFIG_FILE=/config/config.yml"
# netdata:
# container_name: netdata
# image: netdata/netdata
# restart: unless-stopped
# networks:
# - net
# ports:
# - "{{netdata_port}}:19999"
# volumes:
# - "{{netdata_config}}:/etc/netdata"
# - "{{netdata_lib}}:/var/lib/netdata"
# - "{{netdata_cache}}:/var/cache/netdata"
# - /etc/passwd:/host/etc/passwd:ro
# - /etc/group:/host/etc/group:ro
# - /proc:/host/proc:ro
# - /sys:/host/sys:ro
# - /etc/os-release:/host/etc/os-release:ro
# environment:
# - "DO_NOT_TRACK=1"
# cap_add:
# - SYS_PTRACE
# security_opt:
# - apparmor:unconfined
# hostname: "{{ aya01_host }}"
cupsd:
container_name: cupsd
image: olbat/cupsd
restart: unless-stopped
networks:
- net
ports:
- "{{ prm_port }}:9090"
- "{{cupsd_port}}:631"
volumes:
- "{{ prm_config }}:/etc/prometheus"
- /var/run/dbus:/var/run/dbus
- "{{cupsd_config}}:/etc/cups"
labels:
- "traefik.enable=true"
- "traefik.http.routers.cupsd.rule=Host(`{{ cupsd_host }}.{{ aya01_host }}.{{local_domain}}`)"
- "traefik.http.services.cupsd.loadbalancer.server.port={{ cupsd_port }}"
exporter_mikrotik:
container_name: exporter_mikrotik
image: "nshttpd/mikrotik-exporter:{{ e_mikrotik_version }}"
restart: unless-stopped
user: "{{ puid }}:{{ pgid }}"
kuma:
container_name: kuma
image: louislam/uptime-kuma:1
restart: always
networks:
- net
ports:
- "{{ e_mikrotik_port }}:9436"
- "{{ kuma_port }}:3001"
volumes:
- "{{ e_mikrotik_config }}:/config"
environment:
- "CONFIG_FILE=/config/config.yml"
netdata:
container_name: netdata
image: netdata/netdata
restart: unless-stopped
networks:
- net
ports:
- "{{netdata_port}}:19999"
volumes:
- "{{netdata_config}}:/etc/netdata"
- "{{netdata_lib}}:/var/lib/netdata"
- "{{netdata_cache}}:/var/cache/netdata"
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /etc/os-release:/host/etc/os-release:ro
environment:
- "DO_NOT_TRACK=1"
cap_add:
- SYS_PTRACE
security_opt:
- apparmor:unconfined
hostname: "{{ aya01_host }}"
- "{{ kuma_config }}:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.kuma.rule=Host(`{{ kuma_host }}.{{ aya01_host }}.{{local_domain}}`)"
- "traefik.http.services.kuma.loadbalancer.server.port={{ kuma_port }}"
networks:
zoneminder:

View File

@ -0,0 +1,196 @@
#
# Configuration file for the CUPS scheduler. See "man cupsd.conf" for a
# complete description of this file.
#
# Log general information in error_log - change "warn" to "debug"
# for troubleshooting...
LogLevel warn
PageLogFormat
ServerAlias *
# Specifies the maximum size of the log files before they are rotated. The value "0" disables log rotation.
MaxLogSize 0
# Default error policy for printers
ErrorPolicy retry-job
# Allow remote access
Listen *:631
# Show shared printers on the local network.
Browsing Yes
BrowseLocalProtocols dnssd
# Default authentication type, when authentication is required...
DefaultAuthType Basic
DefaultEncryption IfRequested
# Web interface setting...
WebInterface Yes
# Timeout after cupsd exits if idle (applied only if cupsd runs on-demand - with -l)
IdleExitTimeout 60
# Restrict access to the server...
<Location />
Order allow,deny
Allow all
</Location>
# Restrict access to the admin pages...
<Location /admin>
Order allow,deny
Allow all
</Location>
# Restrict access to configuration files...
<Location /admin/conf>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Restrict access to log files...
<Location /admin/log>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Set the default printer/job policies...
<Policy default>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default CUPS-Get-Devices>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>
# Set the authenticated printer/job policies...
<Policy authenticated>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Default
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>
# Set the kerberized printer/job policies...
<Policy kerberos>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Negotiate
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>

View File

@ -9,9 +9,7 @@ entryPoints:
# Docker configuration backend
providers:
docker:
exposedByDefault: false
network: compose_net
defaultRule: "Host(`{{ '{{' }} trimPrefix `/` .Name {{ '}}' }}.{{ local_domain }}`)"
endpoint: "unix:///var/run/docker.sock"
# API and dashboard configuration
api:

View File

@ -0,0 +1,38 @@
version: '3'
services:
swag:
image: lscr.io/linuxserver/swag:latest
container_name: swag
networks:
net: {}
cap_add:
- NET_ADMIN
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
- URL={{ remote_domain }}
- VALIDATION=http
- SUBDOMAINS=www, #optional
- CERTPROVIDER= #optional
- DNSPLUGIN=cloudflare #optional
- PROPAGATION= #optional
- EMAIL= #optional
- ONLY_SUBDOMAINS=false #optional
- EXTRA_DOMAINS= #optional
- STAGING=false #optional
volumes:
- "{{ swag_config }}:/config"
ports:
- "{{ swag_port }}:443"
- 80:80 #optional
restart: unless-stopped
networks:
net:
driver: bridge
ipam:
# driver: default
config:
- subnet: 172.16.69.0/24
gateway: 172.16.69.1

View File

@ -3,42 +3,37 @@ services:
traefik:
container_name: traefik
image: traefik:latest
restart: unless-stopped
restart: always
networks:
net: {}
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "{{ traefik_etc }}/traefik.yml:/etc/traefik/traefik.yml"
- "{{ traefik_var }}/traefik.log:/var/log/traefik.log"
- "{{ traefik_var }}/access.log:/var/log/traefik.log"
- "{{ traefik_config }}:/etc/traefik/"
- "{{ traefik_data }}:/var/log/"
ports:
- 80:80
- 8080:8080
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.rule=Host(`traefik.{{local_domain}}`)"
# - "traefik.http.routers.traefik.entrypoints=web"
# - "traefik.http.services.traefik.loadbalancer.server.port=80"
- "{{ traefik_user_port }}:80"
- "{{ traefik_admin_port }}:8080"
ddns-updater:
container_name: ddns-updater
image: "ghcr.io/qdm12/ddns-updater"
restart: unless-stopped
restart: always
networks:
net: {}
volumes:
- "{{ ddns_updater_data }}:/updater/data/"
- "{{ ddns_data }}:/updater/data/"
ports:
- 8000:8000/tcp
- "{{ ddns_port }}:8000/tcp"
labels:
- "traefik.enable=true"
- "traefik.http.routers.homeassistant.rule=Host(`ddns.{{local_domain}}`)"
- "traefik.http.routers.ddns-updater.rule=Host(`{{ ddns_host }}.{{ pi_host }}.{{local_domain}}`)"
- "traefik.http.services.ddns-updater.loadbalancer.server.port={{ ddns_port }}"
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
restart: unless-stopped
restart: always
networks:
net: {}
volumes:
@ -46,27 +41,26 @@ services:
- "{{ ha_config }}:/config/"
privileged: true
ports:
- 8123:8123
- "{{ ha_port }}:8123"
- 4357:4357
- 5683:5683
- 5683:5683/udp
labels:
- "traefik.enable=true"
- "traefik.http.routers.homeassistant.rule=Host(`hass.{{local_domain}}`)"
# - "traefik.http.routers.homeassistant.entrypoints=web"
# - "traefik.http.services.homeassistant.loadbalancer.server.port=8123"
- "traefik.http.routers.homeassistant.rule=Host(`{{ ha_host }}.{{ pi_host }}.{{ local_domain }}`)"
- "traefik.http.services.homeassistant.loadbalancer.server.port={{ ha_port }}"
pihole:
container_name: pihole
image: pihole/pihole:latest
restart: unless-stopped
restart: always
networks:
net: {}
ports:
- "53:53/tcp"
- "53:53/udp"
- "67:67/udp"
- "8089:80/tcp"
- "{{ pihole_port }}:80/tcp"
environment:
- "WEBPASSWORD={{ vault_pi_pihole_password }}"
- "ServerIP=192.168.20.11"
@ -75,7 +69,7 @@ services:
- "DNS1=1.0.0.1"
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ pihole_pihole }}:/etc/pihole/"
- "{{ pihole_config }}:/etc/pihole/"
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
dns:
- 127.0.0.1
@ -84,9 +78,8 @@ services:
- NET_ADMIN
labels:
- "traefik.enable=true"
- "traefik.http.routers.pihole.rule=Host(`pipihole.{{local_domain}}`)"
# - "traefik.http.routers.pihole.entrypoints=web"
# - "traefik.http.services.pihole.loadbalancer.server.port=8089"
- "traefik.http.routers.pihole.rule=Host(`{{ pihole_host }}.{{ pi_host }}.{{ local_domain }}`)"
- "traefik.http.services.pihole.loadbalancer.server.port={{ 80 }}"
networks:
net:

View File

@ -1,31 +1,11 @@
{
"settings": [
{
"provider": "namecheap",
"domain": "tudattr.dev",
"host": "@",
"password": "{{ vault_ddns_tudattrdev_password }}",
"provider_ip": true
}, {
"provider": "namecheap",
"domain": "tudattr.dev",
"host": "www",
"password": "{{ vault_ddns_tudattrdev_password }}",
"provider_ip": true
},
{
"provider": "namecheap",
"domain": "tudattr.dev",
"host": "plex",
"password": "{{ vault_ddns_tudattrdev_password }}",
"provider_ip": true
},
{
"provider": "namecheap",
"domain": "borg.land",
"host": "@",
"password": "{{ vault_ddns_borgland_password }}",
"provider_ip": true
"provider": "namecheap",
"domain": "borg.land",
"host": "@",
"password": "{{ vault_ddns_borgland_password }}",
"provider_ip": true
}
]
}

View File

@ -0,0 +1,8 @@
---
- name: Upgrade shelly
uri:
url: {{ip}}/ota?url=http://archive.shelly-tools.de/version/v1.9.4/SHBDUO-1.zip
- name: Connect shelly to wifi
uri:
url: {{ip}}/settings/sta?enabled=1&ssid={{SSID}}&key={{password}}&ipv4_method=dhcp

View File

@ -0,0 +1,2 @@
---
- include_tasks: init.yaml

1
shelly.yml Normal file
View File

@ -0,0 +1 @@
{ip}/settings/sta?enabled=1&ssid={SSID}&key={password}&ipv4_method=dhcp