Added working traefik configuration/labels for containers
Added new host 'mii' Added uptime kuma in docker Signed-off-by: TuDatTr <tuan-dat.tran@tudattr.dev>pull/1/head
parent
f3254d5b79
commit
7cd43fb8a8
|
@ -1,5 +1,9 @@
|
||||||
# TuDatTr IaC
|
# TuDatTr IaC
|
||||||
|
|
||||||
|
## User
|
||||||
|
It is expected that a user with sudo privilages is on the target, for me the users name is "tudattr"
|
||||||
|
you can add such user with the following command `useradd -m -g sudo -s /bin/bash tudattr`
|
||||||
|
Don't forget to set a password for the new user with `passwd tudattr`
|
||||||
## Backups
|
## Backups
|
||||||
Backup for aya01 and raspberry are in a backblaze b2, which gets encrypted on the clientside by rclone.
|
Backup for aya01 and raspberry are in a backblaze b2, which gets encrypted on the clientside by rclone.
|
||||||
but first of all we need to create the buckets and provide ansible with the needed information.
|
but first of all we need to create the buckets and provide ansible with the needed information.
|
||||||
|
|
|
@ -14,7 +14,8 @@
|
||||||
- power_management
|
- power_management
|
||||||
- role: backblaze
|
- role: backblaze
|
||||||
tags:
|
tags:
|
||||||
- backblaze
|
- backup
|
||||||
- role: docker
|
- role: docker
|
||||||
tags:
|
tags:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
user: tudattr
|
user: tudattr
|
||||||
timezone: Europe/Berlin
|
timezone: Europe/Berlin
|
||||||
local_domain: borg.land
|
local_domain: borg.land
|
||||||
|
remote_domain: tudattr.dev
|
||||||
rclone_config: "/root/.config/rclone/"
|
rclone_config: "/root/.config/rclone/"
|
||||||
puid: 1000
|
puid: 1000
|
||||||
pgid: 1000
|
pgid: 1000
|
||||||
|
@ -31,13 +32,14 @@ common_packages:
|
||||||
- git
|
- git
|
||||||
- iperf3
|
- iperf3
|
||||||
- git
|
- git
|
||||||
- tmux
|
|
||||||
- smartmontools
|
- smartmontools
|
||||||
- vim
|
- vim
|
||||||
- curl
|
- curl
|
||||||
- tree
|
- tree
|
||||||
- rsync
|
- rsync
|
||||||
- systemd-timesyncd
|
- systemd-timesyncd
|
||||||
|
- neofetch
|
||||||
|
- build-essential
|
||||||
|
|
||||||
#
|
#
|
||||||
# Docker
|
# Docker
|
||||||
|
@ -61,24 +63,91 @@ mysql_user: user
|
||||||
aya01_host: "aya01"
|
aya01_host: "aya01"
|
||||||
aya01_ip: "192.168.20.12"
|
aya01_ip: "192.168.20.12"
|
||||||
|
|
||||||
zoneminder_config: "{{ docker_dir }}/zm/"
|
#
|
||||||
|
# ZoneMinder
|
||||||
|
#
|
||||||
|
|
||||||
|
zoneminder_host: "zm"
|
||||||
|
zoneminder_port: "8081"
|
||||||
|
|
||||||
|
zoneminder_root: "{{ docker_dir }}/zm/"
|
||||||
|
zoneminder_config: "{{ zoneminder_root }}/config/"
|
||||||
|
zoneminder_log: "{{ zoneminder_root}}/log/"
|
||||||
|
zoneminder_db: "{{ zoneminder_root}}/db/"
|
||||||
|
|
||||||
zoneminder_data: "{{ docker_data_dir }}/zm/data/"
|
zoneminder_data: "{{ docker_data_dir }}/zm/data/"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Syncthing
|
||||||
|
#
|
||||||
|
|
||||||
|
syncthing_host: "syncthing"
|
||||||
|
syncthing_port: "8384"
|
||||||
syncthing_data: "{{docker_data_dir}}/syncthing/"
|
syncthing_data: "{{docker_data_dir}}/syncthing/"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Softserve
|
||||||
|
#
|
||||||
|
|
||||||
softserve_data: "{{docker_dir}}/softserve/data"
|
softserve_data: "{{docker_dir}}/softserve/data"
|
||||||
|
|
||||||
|
#
|
||||||
|
# cupsd
|
||||||
|
#
|
||||||
|
|
||||||
|
cupsd_host: "cupsd"
|
||||||
|
cupsd_port: "631"
|
||||||
|
cupsd_config: "{{ docker_dir }}/cupsd/"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Uptime Kuma
|
||||||
|
#
|
||||||
|
|
||||||
|
kuma_host: "uptime"
|
||||||
|
kuma_port: "3001"
|
||||||
|
kuma_config: "{{ docker_dir }}/kuma/"
|
||||||
|
|
||||||
#
|
#
|
||||||
# pi
|
# pi
|
||||||
#
|
#
|
||||||
|
|
||||||
traefik_etc: "{{ docker_dir }}/traefik/etc-traefik/"
|
pi_host: "pi"
|
||||||
traefik_var: "{{ docker_dir }}/traefik/var-log/"
|
pi_ip: "192.168.20.11"
|
||||||
ddns_updater_data: "{{ docker_dir }}/ddns-updater/data/"
|
|
||||||
|
#
|
||||||
|
# Traefik
|
||||||
|
#
|
||||||
|
|
||||||
|
traefik_host: "traefik"
|
||||||
|
traefik_user_port: "80"
|
||||||
|
traefik_admin_port: "8080"
|
||||||
|
|
||||||
|
traefik_config: "{{ docker_dir }}/traefik/etc-traefik/"
|
||||||
|
traefik_data: "{{ docker_dir }}/traefik/var-log/"
|
||||||
|
|
||||||
|
#
|
||||||
|
# DynDns Updater
|
||||||
|
#
|
||||||
|
|
||||||
|
ddns_host: "ddns"
|
||||||
|
ddns_port: "8000"
|
||||||
|
ddns_data: "{{ docker_dir }}/ddns-updater/data/"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Home Assistant
|
||||||
|
#
|
||||||
|
|
||||||
|
ha_host: "hass"
|
||||||
|
ha_port: "8123"
|
||||||
ha_config: "{{ docker_dir }}/home-assistant/config/"
|
ha_config: "{{ docker_dir }}/home-assistant/config/"
|
||||||
|
|
||||||
pihole_pihole: "{{ docker_dir }}/pihole/etc-pihole/"
|
#
|
||||||
|
# pihole
|
||||||
|
#
|
||||||
|
|
||||||
|
pihole_host: "pihole"
|
||||||
|
pihole_port: "8089"
|
||||||
|
pihole_config: "{{ docker_dir }}/pihole/etc-pihole/"
|
||||||
pihole_dnsmasq: "{{ docker_dir }}/pihole/etc-dnsmasq.d/"
|
pihole_dnsmasq: "{{ docker_dir }}/pihole/etc-dnsmasq.d/"
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -94,7 +163,6 @@ backblaze_paths:
|
||||||
- "{{ docker_compose_dir }}"
|
- "{{ docker_compose_dir }}"
|
||||||
- "{{ docker_dir }}"
|
- "{{ docker_dir }}"
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# samba
|
# samba
|
||||||
#
|
#
|
||||||
|
@ -112,6 +180,7 @@ smb_user: "smbuser"
|
||||||
#
|
#
|
||||||
# prometheus/grafana
|
# prometheus/grafana
|
||||||
#
|
#
|
||||||
|
|
||||||
prm_user: "prometheus"
|
prm_user: "prometheus"
|
||||||
exporter_dir: "{{ docker_dir }}/exporter/"
|
exporter_dir: "{{ docker_dir }}/exporter/"
|
||||||
|
|
||||||
|
@ -139,3 +208,9 @@ netdata_port: "19999"
|
||||||
netdata_config: "{{ docker_dir }}/netdata/"
|
netdata_config: "{{ docker_dir }}/netdata/"
|
||||||
netdata_lib: "{{ docker_data_dir }}/netdata/lib/"
|
netdata_lib: "{{ docker_data_dir }}/netdata/lib/"
|
||||||
netdata_cache: "{{ docker_data_dir }}/netdata/cache"
|
netdata_cache: "{{ docker_data_dir }}/netdata/cache"
|
||||||
|
|
||||||
|
#
|
||||||
|
#
|
||||||
|
#
|
||||||
|
swag_port: "443"
|
||||||
|
swag_config: "{{ docker_dir }}/swag/config/"
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
ansible_user: "{{ user }}"
|
||||||
|
ansible_host: 202.61.207.139
|
||||||
|
ansible_port: 22
|
||||||
|
ansible_ssh_private_key_file: /mnt/veracrypt1/genesis
|
||||||
|
ansible_become_pass: '{{ vault_mii_tudattr_password }}'
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: Set up Servers
|
||||||
|
hosts: vps
|
||||||
|
gather_facts: yes
|
||||||
|
roles:
|
||||||
|
- role: common
|
||||||
|
tags:
|
||||||
|
- common
|
||||||
|
- role: docker
|
||||||
|
tags:
|
||||||
|
- docker
|
|
@ -3,3 +3,6 @@ aya01
|
||||||
|
|
||||||
[raspberry]
|
[raspberry]
|
||||||
pi
|
pi
|
||||||
|
|
||||||
|
[vps]
|
||||||
|
mii
|
||||||
|
|
|
@ -12,21 +12,33 @@
|
||||||
tags:
|
tags:
|
||||||
- syncthing
|
- syncthing
|
||||||
|
|
||||||
- include_tasks: grafana.yml
|
#- include_tasks: grafana.yml
|
||||||
tags:
|
# tags:
|
||||||
- grafana
|
# - grafana
|
||||||
|
|
||||||
- include_tasks: softserve.yml
|
- include_tasks: softserve.yml
|
||||||
tags:
|
tags:
|
||||||
- softserve
|
- softserve
|
||||||
|
|
||||||
- include_tasks: prometheus.yml
|
#- include_tasks: prometheus.yml
|
||||||
|
# tags:
|
||||||
|
# - prometheus
|
||||||
|
#
|
||||||
|
#- include_tasks: netdata.yaml
|
||||||
|
# tags:
|
||||||
|
# - netdata
|
||||||
|
#
|
||||||
|
- include_tasks: cupsd.yml
|
||||||
tags:
|
tags:
|
||||||
- prometheus
|
- cupsd
|
||||||
|
|
||||||
- include_tasks: netdata.yaml
|
- include_tasks: kuma.yml
|
||||||
tags:
|
tags:
|
||||||
- netdata
|
- kuma
|
||||||
|
|
||||||
|
- include_tasks: traefik.yml
|
||||||
|
tags:
|
||||||
|
- traefik
|
||||||
|
|
||||||
- name: Copy the compose file
|
- name: Copy the compose file
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
- name: Create cupsd-config directory
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
owner: "{{ puid }}"
|
||||||
|
group: "{{ pgid }}"
|
||||||
|
mode: '755'
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- "{{ cupsd_config }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Copy default config
|
||||||
|
template:
|
||||||
|
owner: "{{ puid }}"
|
||||||
|
src: "templates/aya01/cupsd/cupsd.conf"
|
||||||
|
dest: "{{ cupsd_config }}/cupsd.conf"
|
||||||
|
mode: '660'
|
||||||
|
become: true
|
|
@ -0,0 +1,30 @@
|
||||||
|
---
|
||||||
|
- name: Create zoneminder user
|
||||||
|
user:
|
||||||
|
name: zm
|
||||||
|
uid: 911
|
||||||
|
shell: /bin/false
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Create Zoneminder config directory
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
owner: 911
|
||||||
|
group: 911
|
||||||
|
mode: '700'
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- "{{ zoneminder_config }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Create Zoneminder data directory
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
owner: 911
|
||||||
|
group: 911
|
||||||
|
mode: '755'
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- "{{ zoneminder_data }}"
|
||||||
|
become: true
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: Create kuma-config directory
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
owner: "{{ puid }}"
|
||||||
|
group: "{{ pgid }}"
|
||||||
|
mode: '755'
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- "{{ kuma_config }}"
|
||||||
|
become: true
|
|
@ -9,3 +9,7 @@
|
||||||
when: inventory_hostname == "aya01"
|
when: inventory_hostname == "aya01"
|
||||||
tags:
|
tags:
|
||||||
- reload_compose
|
- reload_compose
|
||||||
|
- include_tasks: mii_compose.yml
|
||||||
|
when: inventory_hostname == "mii"
|
||||||
|
tags:
|
||||||
|
- reload_compose
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
- include_tasks: swag.yml
|
||||||
|
tags:
|
||||||
|
- swag
|
||||||
|
|
||||||
|
- name: Copy the compose file
|
||||||
|
template:
|
||||||
|
src: templates/mii/compose.yaml
|
||||||
|
dest: "{{ docker_compose_dir }}/compose.yaml"
|
||||||
|
tags:
|
||||||
|
- reload_compose
|
||||||
|
|
||||||
|
- name: Run docker compose
|
||||||
|
shell:
|
||||||
|
cmd: "docker compose up -d"
|
||||||
|
chdir: "{{ docker_compose_dir }}"
|
||||||
|
tags:
|
||||||
|
- reload_compose
|
|
@ -3,21 +3,8 @@
|
||||||
- name: Create traefik-config directory
|
- name: Create traefik-config directory
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: 1000
|
owner: "{{ puid }}"
|
||||||
mode: '700'
|
group: "{{ pgid }}"
|
||||||
state: directory
|
state: directory
|
||||||
loop:
|
loop:
|
||||||
- "{{ docker_dir }}/traefik/etc-traefik/"
|
- "{{ swag_config }}"
|
||||||
- "{{ docker_dir }}/traefik/var-log/"
|
|
||||||
|
|
||||||
- name: Copy traefik-config
|
|
||||||
template:
|
|
||||||
owner: 1000
|
|
||||||
src: "templates/pi/{{ item }}"
|
|
||||||
dest: "{{ docker_dir }}/{{ item }}"
|
|
||||||
mode: '400'
|
|
||||||
loop:
|
|
||||||
- "traefik/etc-traefik/traefik.yml"
|
|
||||||
- "traefik/var-log/access.log"
|
|
||||||
- "traefik/var-log/traefik.log"
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,20 @@
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
|
traefik:
|
||||||
|
container_name: traefik
|
||||||
|
image: traefik:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
net: {}
|
||||||
|
volumes:
|
||||||
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
- "{{ traefik_config }}:/etc/traefik/"
|
||||||
|
- "{{ traefik_data }}:/var/log/"
|
||||||
|
ports:
|
||||||
|
- "{{ traefik_user_port}}:80"
|
||||||
|
- "{{ traefik_admin_port}}:8080"
|
||||||
|
|
||||||
db:
|
db:
|
||||||
container_name: zoneminder_db
|
container_name: zoneminder_db
|
||||||
image: mariadb
|
image: mariadb
|
||||||
|
@ -8,9 +23,9 @@ services:
|
||||||
- zoneminder
|
- zoneminder
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "{{ zoneminder_config}}/db:/var/lib/mysql"
|
- "{{ zoneminder_db }}:/var/lib/mysql"
|
||||||
environment:
|
environment:
|
||||||
- "MYSQL_DATABASE=zm"
|
- "MYSQL_DATABASE={{ zoneminder_host }}"
|
||||||
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
|
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
|
||||||
- "MYSQL_USER={{ mysql_user }}"
|
- "MYSQL_USER={{ mysql_user }}"
|
||||||
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
|
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
|
||||||
|
@ -28,24 +43,28 @@ services:
|
||||||
- zoneminder
|
- zoneminder
|
||||||
- net
|
- net
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- "{{ zoneminder_port }}:80"
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "{{ zoneminder_data }}:/data"
|
- "{{ zoneminder_data }}:/data"
|
||||||
- "{{ zoneminder_config }}/config:/config"
|
- "{{ zoneminder_config }}:/config"
|
||||||
- "{{ zoneminder_config }}/log:/log"
|
- "{{ zoneminder_log}}:/log"
|
||||||
- type: tmpfs
|
- type: tmpfs
|
||||||
target: /dev/shm
|
target: /dev/shm
|
||||||
tmpfs:
|
tmpfs:
|
||||||
size: 1000000000
|
size: 1000000000
|
||||||
environment:
|
environment:
|
||||||
- "MYSQL_DATABASE=zm"
|
- "MYSQL_DATABASE={{ zoneminder_host }}"
|
||||||
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
|
- "MYSQL_ROOT_PASSWORD={{ vault_mysql_root_password }}"
|
||||||
- "MYSQL_USER={{ mysql_user }}"
|
- "MYSQL_USER={{ mysql_user }}"
|
||||||
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
|
- "MYSQL_PASSWORD={{ vault_mysql_user_password }}"
|
||||||
- "MAX_LOG_SIZE_BYTES=1000000"
|
- "MAX_LOG_SIZE_BYTES=1000000"
|
||||||
- "MAX_LOG_NUMBER=20"
|
- "MAX_LOG_NUMBER=20"
|
||||||
- "TZ=Europe/Berlin"
|
- "TZ=Europe/Berlin"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.zoneminder.rule=Host(`{{ zoneminder_host}}.{{ aya01_host }}.{{ local_domain }}`)"
|
||||||
|
- "traefik.http.services.zoneminder.loadbalancer.server.port={{ 80 }}"
|
||||||
|
|
||||||
pihole:
|
pihole:
|
||||||
container_name: pihole
|
container_name: pihole
|
||||||
|
@ -57,10 +76,10 @@ services:
|
||||||
- "53:53/tcp"
|
- "53:53/tcp"
|
||||||
- "53:53/udp"
|
- "53:53/udp"
|
||||||
- "67:67/udp"
|
- "67:67/udp"
|
||||||
- "8089:80/tcp"
|
- "{{ pihole_port }}:80/tcp"
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "{{ pihole_pihole }}:/etc/pihole/"
|
- "{{ pihole_config }}:/etc/pihole/"
|
||||||
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
|
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
|
||||||
environment:
|
environment:
|
||||||
- "WEBPASSWORD={{ vault_aya01_pihole_password }}"
|
- "WEBPASSWORD={{ vault_aya01_pihole_password }}"
|
||||||
|
@ -73,15 +92,19 @@ services:
|
||||||
- 1.1.1.1
|
- 1.1.1.1
|
||||||
cap_add:
|
cap_add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.pihole.rule=Host(`{{ pihole_host }}.{{ aya01_host }}.{{ local_domain }}`)"
|
||||||
|
- "traefik.http.services.pihole.loadbalancer.server.port={{ 80 }}"
|
||||||
|
|
||||||
syncthing:
|
fyncthing:
|
||||||
container_name: syncthing
|
container_name: syncthing
|
||||||
image: syncthing/syncthing
|
image: syncthing/syncthing
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
ports:
|
ports:
|
||||||
- 8384:8384 # Web UI
|
- "{{ syncthing_port }}:8384" # Web UI
|
||||||
- 22000:22000/tcp # TCP file transfers
|
- 22000:22000/tcp # TCP file transfers
|
||||||
- 22000:22000/udp # QUIC file transfers
|
- 22000:22000/udp # QUIC file transfers
|
||||||
- 21027:21027/udp # Receive local discovery broadcasts
|
- 21027:21027/udp # Receive local discovery broadcasts
|
||||||
|
@ -91,22 +114,26 @@ services:
|
||||||
- "PUID={{ puid }}"
|
- "PUID={{ puid }}"
|
||||||
- "PGID={{ pgid }}"
|
- "PGID={{ pgid }}"
|
||||||
hostname: syncthing
|
hostname: syncthing
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.syncthing.rule=Host(`{{ syncthing_host }}.{{ aya01_host }}.{{ local_domain }}`)"
|
||||||
|
- "traefik.http.services.syncthing.loadbalancer.server.port={{ syncthing_port }}"
|
||||||
|
|
||||||
grafana:
|
# grafana:
|
||||||
container_name: grafana
|
# container_name: grafana
|
||||||
image: grafana/grafana-oss
|
# image: grafana/grafana-oss
|
||||||
restart: unless-stopped
|
# restart: unless-stopped
|
||||||
user: "{{ puid }}:{{ pgid }}"
|
# user: "{{ puid }}:{{ pgid }}"
|
||||||
networks:
|
# networks:
|
||||||
- net
|
# - net
|
||||||
ports:
|
# ports:
|
||||||
- 3000:3000
|
# - 3000:3000
|
||||||
volumes:
|
# volumes:
|
||||||
- "{{ grafana_data }}:/var/lib/grafana/"
|
# - "{{ grafana_data }}:/var/lib/grafana/"
|
||||||
- "{{ grafana_log }}:/var/log/grafana/"
|
# - "{{ grafana_log }}:/var/log/grafana/"
|
||||||
environment:
|
# environment:
|
||||||
- "GF_LOG_MODE=console file"
|
# - "GF_LOG_MODE=console file"
|
||||||
hostname: grafana
|
# hostname: grafana
|
||||||
|
|
||||||
soft-serve:
|
soft-serve:
|
||||||
container_name: soft-serve
|
container_name: soft-serve
|
||||||
|
@ -115,60 +142,90 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
ports:
|
ports:
|
||||||
- 23231:23231
|
- 23231:23231 # ssh
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ softserve_data }}:/soft-serve"
|
- "{{ softserve_data }}:/soft-serve"
|
||||||
|
|
||||||
prometheus:
|
# prometheus:
|
||||||
container_name: prometheus
|
# container_name: prometheus
|
||||||
image: prom/prometheus
|
# image: prom/prometheus
|
||||||
|
# restart: unless-stopped
|
||||||
|
# networks:
|
||||||
|
# - net
|
||||||
|
# ports:
|
||||||
|
# - "{{ prm_port }}:9090"
|
||||||
|
# volumes:
|
||||||
|
# - "{{ prm_config }}:/etc/prometheus"
|
||||||
|
|
||||||
|
# exporter_mikrotik:
|
||||||
|
# container_name: exporter_mikrotik
|
||||||
|
# image: "nshttpd/mikrotik-exporter:{{ e_mikrotik_version }}"
|
||||||
|
# restart: unless-stopped
|
||||||
|
# user: "{{ puid }}:{{ pgid }}"
|
||||||
|
# networks:
|
||||||
|
# - net
|
||||||
|
# ports:
|
||||||
|
# - "{{ e_mikrotik_port }}:9436"
|
||||||
|
# volumes:
|
||||||
|
# - "{{ e_mikrotik_config }}:/config"
|
||||||
|
# environment:
|
||||||
|
# - "CONFIG_FILE=/config/config.yml"
|
||||||
|
|
||||||
|
# netdata:
|
||||||
|
# container_name: netdata
|
||||||
|
# image: netdata/netdata
|
||||||
|
# restart: unless-stopped
|
||||||
|
# networks:
|
||||||
|
# - net
|
||||||
|
# ports:
|
||||||
|
# - "{{netdata_port}}:19999"
|
||||||
|
# volumes:
|
||||||
|
# - "{{netdata_config}}:/etc/netdata"
|
||||||
|
# - "{{netdata_lib}}:/var/lib/netdata"
|
||||||
|
# - "{{netdata_cache}}:/var/cache/netdata"
|
||||||
|
# - /etc/passwd:/host/etc/passwd:ro
|
||||||
|
# - /etc/group:/host/etc/group:ro
|
||||||
|
# - /proc:/host/proc:ro
|
||||||
|
# - /sys:/host/sys:ro
|
||||||
|
# - /etc/os-release:/host/etc/os-release:ro
|
||||||
|
# environment:
|
||||||
|
# - "DO_NOT_TRACK=1"
|
||||||
|
# cap_add:
|
||||||
|
# - SYS_PTRACE
|
||||||
|
# security_opt:
|
||||||
|
# - apparmor:unconfined
|
||||||
|
# hostname: "{{ aya01_host }}"
|
||||||
|
|
||||||
|
cupsd:
|
||||||
|
container_name: cupsd
|
||||||
|
image: olbat/cupsd
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
ports:
|
ports:
|
||||||
- "{{ prm_port }}:9090"
|
- "{{cupsd_port}}:631"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ prm_config }}:/etc/prometheus"
|
- /var/run/dbus:/var/run/dbus
|
||||||
|
- "{{cupsd_config}}:/etc/cups"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.cupsd.rule=Host(`{{ cupsd_host }}.{{ aya01_host }}.{{local_domain}}`)"
|
||||||
|
- "traefik.http.services.cupsd.loadbalancer.server.port={{ cupsd_port }}"
|
||||||
|
|
||||||
exporter_mikrotik:
|
kuma:
|
||||||
container_name: exporter_mikrotik
|
container_name: kuma
|
||||||
image: "nshttpd/mikrotik-exporter:{{ e_mikrotik_version }}"
|
image: louislam/uptime-kuma:1
|
||||||
restart: unless-stopped
|
restart: always
|
||||||
user: "{{ puid }}:{{ pgid }}"
|
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
ports:
|
ports:
|
||||||
- "{{ e_mikrotik_port }}:9436"
|
- "{{ kuma_port }}:3001"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ e_mikrotik_config }}:/config"
|
- "{{ kuma_config }}:/app/data"
|
||||||
environment:
|
labels:
|
||||||
- "CONFIG_FILE=/config/config.yml"
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.kuma.rule=Host(`{{ kuma_host }}.{{ aya01_host }}.{{local_domain}}`)"
|
||||||
netdata:
|
- "traefik.http.services.kuma.loadbalancer.server.port={{ kuma_port }}"
|
||||||
container_name: netdata
|
|
||||||
image: netdata/netdata
|
|
||||||
restart: unless-stopped
|
|
||||||
networks:
|
|
||||||
- net
|
|
||||||
ports:
|
|
||||||
- "{{netdata_port}}:19999"
|
|
||||||
volumes:
|
|
||||||
- "{{netdata_config}}:/etc/netdata"
|
|
||||||
- "{{netdata_lib}}:/var/lib/netdata"
|
|
||||||
- "{{netdata_cache}}:/var/cache/netdata"
|
|
||||||
- /etc/passwd:/host/etc/passwd:ro
|
|
||||||
- /etc/group:/host/etc/group:ro
|
|
||||||
- /proc:/host/proc:ro
|
|
||||||
- /sys:/host/sys:ro
|
|
||||||
- /etc/os-release:/host/etc/os-release:ro
|
|
||||||
environment:
|
|
||||||
- "DO_NOT_TRACK=1"
|
|
||||||
cap_add:
|
|
||||||
- SYS_PTRACE
|
|
||||||
security_opt:
|
|
||||||
- apparmor:unconfined
|
|
||||||
hostname: "{{ aya01_host }}"
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
zoneminder:
|
zoneminder:
|
||||||
|
|
|
@ -0,0 +1,196 @@
|
||||||
|
#
|
||||||
|
# Configuration file for the CUPS scheduler. See "man cupsd.conf" for a
|
||||||
|
# complete description of this file.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Log general information in error_log - change "warn" to "debug"
|
||||||
|
# for troubleshooting...
|
||||||
|
LogLevel warn
|
||||||
|
PageLogFormat
|
||||||
|
ServerAlias *
|
||||||
|
|
||||||
|
# Specifies the maximum size of the log files before they are rotated. The value "0" disables log rotation.
|
||||||
|
MaxLogSize 0
|
||||||
|
|
||||||
|
# Default error policy for printers
|
||||||
|
ErrorPolicy retry-job
|
||||||
|
|
||||||
|
# Allow remote access
|
||||||
|
Listen *:631
|
||||||
|
|
||||||
|
# Show shared printers on the local network.
|
||||||
|
Browsing Yes
|
||||||
|
BrowseLocalProtocols dnssd
|
||||||
|
|
||||||
|
# Default authentication type, when authentication is required...
|
||||||
|
DefaultAuthType Basic
|
||||||
|
DefaultEncryption IfRequested
|
||||||
|
|
||||||
|
# Web interface setting...
|
||||||
|
WebInterface Yes
|
||||||
|
|
||||||
|
# Timeout after cupsd exits if idle (applied only if cupsd runs on-demand - with -l)
|
||||||
|
IdleExitTimeout 60
|
||||||
|
|
||||||
|
# Restrict access to the server...
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow all
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
# Restrict access to the admin pages...
|
||||||
|
<Location /admin>
|
||||||
|
Order allow,deny
|
||||||
|
Allow all
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
# Restrict access to configuration files...
|
||||||
|
<Location /admin/conf>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order allow,deny
|
||||||
|
Allow all
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
# Restrict access to log files...
|
||||||
|
<Location /admin/log>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order allow,deny
|
||||||
|
Allow all
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
# Set the default printer/job policies...
|
||||||
|
<Policy default>
|
||||||
|
# Job/subscription privacy...
|
||||||
|
JobPrivateAccess default
|
||||||
|
JobPrivateValues default
|
||||||
|
SubscriptionPrivateAccess default
|
||||||
|
SubscriptionPrivateValues default
|
||||||
|
|
||||||
|
# Job-related operations must be done by the owner or an administrator...
|
||||||
|
<Limit Create-Job Print-Job Print-URI Validate-Job>
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All administration operations require an administrator to authenticate...
|
||||||
|
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default CUPS-Get-Devices>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All printer operations require a printer operator to authenticate...
|
||||||
|
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# Only the owner or an administrator can cancel or authenticate a job...
|
||||||
|
<Limit Cancel-Job CUPS-Authenticate-Job>
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit All>
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
</Policy>
|
||||||
|
|
||||||
|
# Set the authenticated printer/job policies...
|
||||||
|
<Policy authenticated>
|
||||||
|
# Job/subscription privacy...
|
||||||
|
JobPrivateAccess default
|
||||||
|
JobPrivateValues default
|
||||||
|
SubscriptionPrivateAccess default
|
||||||
|
SubscriptionPrivateValues default
|
||||||
|
|
||||||
|
# Job-related operations must be done by the owner or an administrator...
|
||||||
|
<Limit Create-Job Print-Job Print-URI Validate-Job>
|
||||||
|
AuthType Default
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
|
||||||
|
AuthType Default
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All administration operations require an administrator to authenticate...
|
||||||
|
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All printer operations require a printer operator to authenticate...
|
||||||
|
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# Only the owner or an administrator can cancel or authenticate a job...
|
||||||
|
<Limit Cancel-Job CUPS-Authenticate-Job>
|
||||||
|
AuthType Default
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit All>
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
</Policy>
|
||||||
|
|
||||||
|
# Set the kerberized printer/job policies...
|
||||||
|
<Policy kerberos>
|
||||||
|
# Job/subscription privacy...
|
||||||
|
JobPrivateAccess default
|
||||||
|
JobPrivateValues default
|
||||||
|
SubscriptionPrivateAccess default
|
||||||
|
SubscriptionPrivateValues default
|
||||||
|
|
||||||
|
# Job-related operations must be done by the owner or an administrator...
|
||||||
|
<Limit Create-Job Print-Job Print-URI Validate-Job>
|
||||||
|
AuthType Negotiate
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
|
||||||
|
AuthType Negotiate
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All administration operations require an administrator to authenticate...
|
||||||
|
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# All printer operations require a printer operator to authenticate...
|
||||||
|
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
|
||||||
|
AuthType Default
|
||||||
|
Require user @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
# Only the owner or an administrator can cancel or authenticate a job...
|
||||||
|
<Limit Cancel-Job CUPS-Authenticate-Job>
|
||||||
|
AuthType Negotiate
|
||||||
|
Require user @OWNER @SYSTEM
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
|
||||||
|
<Limit All>
|
||||||
|
Order deny,allow
|
||||||
|
</Limit>
|
||||||
|
</Policy>
|
|
@ -9,9 +9,7 @@ entryPoints:
|
||||||
# Docker configuration backend
|
# Docker configuration backend
|
||||||
providers:
|
providers:
|
||||||
docker:
|
docker:
|
||||||
exposedByDefault: false
|
endpoint: "unix:///var/run/docker.sock"
|
||||||
network: compose_net
|
|
||||||
defaultRule: "Host(`{{ '{{' }} trimPrefix `/` .Name {{ '}}' }}.{{ local_domain }}`)"
|
|
||||||
|
|
||||||
# API and dashboard configuration
|
# API and dashboard configuration
|
||||||
api:
|
api:
|
|
@ -0,0 +1,38 @@
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
swag:
|
||||||
|
image: lscr.io/linuxserver/swag:latest
|
||||||
|
container_name: swag
|
||||||
|
networks:
|
||||||
|
net: {}
|
||||||
|
cap_add:
|
||||||
|
- NET_ADMIN
|
||||||
|
environment:
|
||||||
|
- PUID={{ puid }}
|
||||||
|
- PGID={{ pgid }}
|
||||||
|
- TZ={{ timezone }}
|
||||||
|
- URL={{ remote_domain }}
|
||||||
|
- VALIDATION=http
|
||||||
|
- SUBDOMAINS=www, #optional
|
||||||
|
- CERTPROVIDER= #optional
|
||||||
|
- DNSPLUGIN=cloudflare #optional
|
||||||
|
- PROPAGATION= #optional
|
||||||
|
- EMAIL= #optional
|
||||||
|
- ONLY_SUBDOMAINS=false #optional
|
||||||
|
- EXTRA_DOMAINS= #optional
|
||||||
|
- STAGING=false #optional
|
||||||
|
volumes:
|
||||||
|
- "{{ swag_config }}:/config"
|
||||||
|
ports:
|
||||||
|
- "{{ swag_port }}:443"
|
||||||
|
- 80:80 #optional
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
networks:
|
||||||
|
net:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
# driver: default
|
||||||
|
config:
|
||||||
|
- subnet: 172.16.69.0/24
|
||||||
|
gateway: 172.16.69.1
|
|
@ -3,42 +3,37 @@ services:
|
||||||
traefik:
|
traefik:
|
||||||
container_name: traefik
|
container_name: traefik
|
||||||
image: traefik:latest
|
image: traefik:latest
|
||||||
restart: unless-stopped
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
net: {}
|
net: {}
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
- "{{ traefik_etc }}/traefik.yml:/etc/traefik/traefik.yml"
|
- "{{ traefik_config }}:/etc/traefik/"
|
||||||
- "{{ traefik_var }}/traefik.log:/var/log/traefik.log"
|
- "{{ traefik_data }}:/var/log/"
|
||||||
- "{{ traefik_var }}/access.log:/var/log/traefik.log"
|
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- "{{ traefik_user_port }}:80"
|
||||||
- 8080:8080
|
- "{{ traefik_admin_port }}:8080"
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.traefik.rule=Host(`traefik.{{local_domain}}`)"
|
|
||||||
# - "traefik.http.routers.traefik.entrypoints=web"
|
|
||||||
# - "traefik.http.services.traefik.loadbalancer.server.port=80"
|
|
||||||
|
|
||||||
ddns-updater:
|
ddns-updater:
|
||||||
container_name: ddns-updater
|
container_name: ddns-updater
|
||||||
image: "ghcr.io/qdm12/ddns-updater"
|
image: "ghcr.io/qdm12/ddns-updater"
|
||||||
restart: unless-stopped
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
net: {}
|
net: {}
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ ddns_updater_data }}:/updater/data/"
|
- "{{ ddns_data }}:/updater/data/"
|
||||||
ports:
|
ports:
|
||||||
- 8000:8000/tcp
|
- "{{ ddns_port }}:8000/tcp"
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.homeassistant.rule=Host(`ddns.{{local_domain}}`)"
|
- "traefik.http.routers.ddns-updater.rule=Host(`{{ ddns_host }}.{{ pi_host }}.{{local_domain}}`)"
|
||||||
|
- "traefik.http.services.ddns-updater.loadbalancer.server.port={{ ddns_port }}"
|
||||||
|
|
||||||
homeassistant:
|
homeassistant:
|
||||||
container_name: homeassistant
|
container_name: homeassistant
|
||||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
image: "ghcr.io/home-assistant/home-assistant:stable"
|
||||||
restart: unless-stopped
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
net: {}
|
net: {}
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -46,27 +41,26 @@ services:
|
||||||
- "{{ ha_config }}:/config/"
|
- "{{ ha_config }}:/config/"
|
||||||
privileged: true
|
privileged: true
|
||||||
ports:
|
ports:
|
||||||
- 8123:8123
|
- "{{ ha_port }}:8123"
|
||||||
- 4357:4357
|
- 4357:4357
|
||||||
- 5683:5683
|
- 5683:5683
|
||||||
- 5683:5683/udp
|
- 5683:5683/udp
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.homeassistant.rule=Host(`hass.{{local_domain}}`)"
|
- "traefik.http.routers.homeassistant.rule=Host(`{{ ha_host }}.{{ pi_host }}.{{ local_domain }}`)"
|
||||||
# - "traefik.http.routers.homeassistant.entrypoints=web"
|
- "traefik.http.services.homeassistant.loadbalancer.server.port={{ ha_port }}"
|
||||||
# - "traefik.http.services.homeassistant.loadbalancer.server.port=8123"
|
|
||||||
|
|
||||||
pihole:
|
pihole:
|
||||||
container_name: pihole
|
container_name: pihole
|
||||||
image: pihole/pihole:latest
|
image: pihole/pihole:latest
|
||||||
restart: unless-stopped
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
net: {}
|
net: {}
|
||||||
ports:
|
ports:
|
||||||
- "53:53/tcp"
|
- "53:53/tcp"
|
||||||
- "53:53/udp"
|
- "53:53/udp"
|
||||||
- "67:67/udp"
|
- "67:67/udp"
|
||||||
- "8089:80/tcp"
|
- "{{ pihole_port }}:80/tcp"
|
||||||
environment:
|
environment:
|
||||||
- "WEBPASSWORD={{ vault_pi_pihole_password }}"
|
- "WEBPASSWORD={{ vault_pi_pihole_password }}"
|
||||||
- "ServerIP=192.168.20.11"
|
- "ServerIP=192.168.20.11"
|
||||||
|
@ -75,7 +69,7 @@ services:
|
||||||
- "DNS1=1.0.0.1"
|
- "DNS1=1.0.0.1"
|
||||||
volumes:
|
volumes:
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "{{ pihole_pihole }}:/etc/pihole/"
|
- "{{ pihole_config }}:/etc/pihole/"
|
||||||
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
|
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
|
||||||
dns:
|
dns:
|
||||||
- 127.0.0.1
|
- 127.0.0.1
|
||||||
|
@ -84,9 +78,8 @@ services:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.pihole.rule=Host(`pipihole.{{local_domain}}`)"
|
- "traefik.http.routers.pihole.rule=Host(`{{ pihole_host }}.{{ pi_host }}.{{ local_domain }}`)"
|
||||||
# - "traefik.http.routers.pihole.entrypoints=web"
|
- "traefik.http.services.pihole.loadbalancer.server.port={{ 80 }}"
|
||||||
# - "traefik.http.services.pihole.loadbalancer.server.port=8089"
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
net:
|
net:
|
||||||
|
|
|
@ -1,31 +1,11 @@
|
||||||
{
|
{
|
||||||
"settings": [
|
"settings": [
|
||||||
{
|
{
|
||||||
"provider": "namecheap",
|
"provider": "namecheap",
|
||||||
"domain": "tudattr.dev",
|
"domain": "borg.land",
|
||||||
"host": "@",
|
"host": "@",
|
||||||
"password": "{{ vault_ddns_tudattrdev_password }}",
|
"password": "{{ vault_ddns_borgland_password }}",
|
||||||
"provider_ip": true
|
"provider_ip": true
|
||||||
}, {
|
|
||||||
"provider": "namecheap",
|
|
||||||
"domain": "tudattr.dev",
|
|
||||||
"host": "www",
|
|
||||||
"password": "{{ vault_ddns_tudattrdev_password }}",
|
|
||||||
"provider_ip": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"provider": "namecheap",
|
|
||||||
"domain": "tudattr.dev",
|
|
||||||
"host": "plex",
|
|
||||||
"password": "{{ vault_ddns_tudattrdev_password }}",
|
|
||||||
"provider_ip": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"provider": "namecheap",
|
|
||||||
"domain": "borg.land",
|
|
||||||
"host": "@",
|
|
||||||
"password": "{{ vault_ddns_borgland_password }}",
|
|
||||||
"provider_ip": true
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
- name: Upgrade shelly
|
||||||
|
uri:
|
||||||
|
url: {{ip}}/ota?url=http://archive.shelly-tools.de/version/v1.9.4/SHBDUO-1.zip
|
||||||
|
|
||||||
|
- name: Connect shelly to wifi
|
||||||
|
uri:
|
||||||
|
url: {{ip}}/settings/sta?enabled=1&ssid={{SSID}}&key={{password}}&ipv4_method=dhcp
|
|
@ -0,0 +1,2 @@
|
||||||
|
---
|
||||||
|
- include_tasks: init.yaml
|
|
@ -0,0 +1 @@
|
||||||
|
{ip}/settings/sta?enabled=1&ssid={SSID}&key={password}&ipv4_method=dhcp
|
Loading…
Reference in New Issue