First step towards rewrite

Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
This commit is contained in:
Tuan-Dat Tran
2024-09-17 23:44:20 +02:00
parent 6fd77266cd
commit 50abbf933c
100 changed files with 48 additions and 61953 deletions

View File

@@ -1,24 +0,0 @@
---
- name: Shut down docker
systemd:
name: docker
state: stopped
become: true
# - name: Backing up for "{{ inventory_hostname }}"
# shell:
# cmd: "rclone sync {{ item }} secret:{{ item }} --transfers 16"
# loop: "{{ host.backblaze.paths }}"
# become: true
- name: Backing up for "{{ inventory_hostname }}"
shell:
cmd: "rclone sync {{ item }} secret:{{ item }} --skip-links"
loop: "{{ host.backblaze.paths }}"
become: true
- name: Restart docker
systemd:
name: docker
state: started
become: true

View File

@@ -1,18 +0,0 @@
---
- name: Create rclone config folder
file:
path: "{{ rclone_config }}"
owner: '0'
group: '0'
mode: '700'
state: directory
become: true
- name: Copy "rclone.conf"
template:
src: "rclone.conf.j2"
dest: "{{ rclone_config }}/rclone.conf"
owner: '0'
group: '0'
mode: '400'
become: true

View File

@@ -1,13 +0,0 @@
---
- name: Update and upgrade packages
apt:
update_cache: true
upgrade: true
autoremove: true
become: true
- name: Install rclone
apt:
name: "rclone"
state: present
become: true

View File

@@ -1,5 +0,0 @@
---
- include_tasks: install.yml
- include_tasks: config.yml
- include_tasks: backup.yml

View File

@@ -1,10 +0,0 @@
[remote]
type = b2
account = {{ host.backblaze.account }}
key = {{ host.backblaze.key }}
[secret]
type = crypt
remote = {{ host.backblaze.remote }}
password = {{ host.backblaze.password }}
password2 = {{ host.backblaze.password2 }}

View File

@@ -1,42 +0,0 @@
---
- name: Install dependencies
apt:
name: "mergerfs"
state: present
become: yes
- name: Create mount folders
file:
path: "{{ item.path }}"
state: directory
loop: "{{ host.fstab if host.fstab is iterable else []}}"
become: true
- name: Create fstab entries
mount:
src: "UUID={{ item.uuid }}"
path: "{{ item.path }}"
fstype: "{{ item.type }}"
state: present
backup: true
loop: "{{ host.fstab if host.fstab is iterable else []}}"
become: true
register: fstab
- name: Create/mount mergerfs
mount:
src: "{{ item.branches | join(':') }}"
path: "{{ item.path }}"
fstype: "{{ item.type }}"
opts: "{{ item.opts | join(',') }}"
state: present
backup: true
become: true
loop: "{{ host.mergerfs if host.mergerfs is iterable else []}}"
register: fstab
- name: Mount all disks
command: mount -a
become: true
when: fstab.changed

View File

@@ -3,4 +3,3 @@
- include_tasks: essential.yml
- include_tasks: bash.yml
- include_tasks: sshd.yml
- include_tasks: fstab.yml

View File

@@ -1,96 +0,0 @@
---
# - include_tasks: zoneminder.yml
# tags:
# - zoneminder
- include_tasks: pihole.yml
tags:
- pihole
- include_tasks: syncthing.yml
tags:
- syncthing
# - include_tasks: softserve.yml
# tags:
# - softserve
- include_tasks: cupsd.yml
tags:
- cupsd
- include_tasks: kuma.yml
tags:
- kuma
# - include_tasks: traefik.yml
# tags:
# - traefik
- include_tasks: plex.yml
tags:
- plex
- include_tasks: ddns.yml
tags:
- ddns
- include_tasks: homeassistant.yml
tags:
- homeassistant
- include_tasks: tautulli.yml
tags:
- tautulli
- include_tasks: sonarr.yml
tags:
- sonarr
- include_tasks: radarr.yml
tags:
- radarr
- include_tasks: lidarr.yml
tags:
- lidarr
- include_tasks: prowlarr.yml
tags:
- prowlarr
- include_tasks: bin.yml
tags:
- bin
- include_tasks: gluetun.yml
tags:
- gluetun
- include_tasks: qbit.yml
tags:
- qbit
- include_tasks: qbit_private.yml
tags:
- qbit_priv
- include_tasks: prometheus.yml
tags:
- prometheus
- include_tasks: grafana.yml
tags:
- grafana
- include_tasks: jellyfin.yml
tags:
- jellyfin
- include_tasks: gitea.yml
tags:
- gitea
- include_tasks: gitea-runner.yml
tags:
- gitea-runner

View File

@@ -1,9 +0,0 @@
---
- name: Create bin-config directory
file:
path: "{{ bin_upload }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes

View File

@@ -1,19 +0,0 @@
---
- name: Create cupsd-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ cupsd_config }}"
become: true
- name: Copy cupsd config
template:
owner: "{{ puid }}"
src: "templates/aya01/cupsd/cupsd.conf"
dest: "{{ cupsd_config }}/cupsd.conf"
mode: '660'
become: true

View File

@@ -1,16 +0,0 @@
---
- name: Create ddns-config directory
file:
path: "{{ docker_dir }}/ddns-updater/data/"
owner: 1000
group: 1000
mode: '700'
state: directory
- name: Copy ddns-config
template:
owner: 1000
src: "templates/{{host.hostname}}/ddns-updater/data/config.json"
dest: "{{ docker_dir }}/ddns-updater/data/config.json"
mode: '400'

View File

@@ -1,11 +0,0 @@
---
- name: Create gitea-runner directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ gitea.runner.volumes.data }}"

View File

@@ -1,12 +0,0 @@
---
- name: Create gitea directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ gitea.volumes.data }}"
- "{{ gitea.volumes.config }}"

View File

@@ -1,11 +0,0 @@
---
- name: Create gitlab-runner directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ gitlab.runner.volumes.config }}"

View File

@@ -1,14 +0,0 @@
---
- name: Create gitlab-config
file:
path: "{{ item }}"
owner: "{{ gitlab.puid }}"
group: "{{ gitlab.pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ gitlab.paths.config }}"
- "{{ gitlab.paths.logs }}"
- "{{ gitlab.paths.data }}"

View File

@@ -1,11 +0,0 @@
---
- name: Create gluetun-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '775'
state: directory
loop:
- "{{ gluetun_config}}"
become: true

View File

@@ -1,22 +0,0 @@
---
- name: Create grafana data directory
file:
path: "{{ item }}"
owner: "{{ grafana_puid }}"
group: "{{ grafana_pgid }}"
mode: '755'
state: directory
loop:
- "{{ grafana_data }}"
- "{{ grafana_config }}"
become: true
- name: Copy grafana config
template:
owner: "{{ grafana_puid }}"
group: "{{ grafana_pgid }}"
src: "templates/aya01/grafana/etc-grafana/grafana.ini.j2"
dest: "{{ grafana_config }}/grafana.ini"
mode: '644'
become: true

View File

@@ -1,8 +0,0 @@
---
- name: Create homeassistant-config directory
file:
path: "{{ ha_config }}"
mode: '755'
state: directory
become: true

View File

@@ -1,30 +0,0 @@
---
- name: Create zoneminder user
user:
name: zm
uid: 911
shell: /bin/false
become: true
- name: Create Zoneminder config directory
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '700'
state: directory
loop:
- "{{ zoneminder_config }}"
become: true
- name: Create Zoneminder data directory
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '755'
state: directory
loop:
- "{{ zoneminder_data }}"
become: true

View File

@@ -1,67 +0,0 @@
---
- name: Uninstall old versions
apt:
name: "{{ item }}"
state: absent
purge: true
loop:
- docker
- docker-engine
- docker.io
- containerd
- runc
become: true
- name: Update cache
apt:
update_cache: true
become: true
- name: Install dependencies for apt to use repositories over HTTPS
apt:
name: "{{ item }}"
state: present
loop:
- ca-certificates
- curl
- gnupg
- lsb-release
become: true
- name: Create keyrings direcoty
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
become: true
- name: Add Docker apt key.
ansible.builtin.get_url:
url: "{{ docker_apt_gpg_key }}"
dest: /etc/apt/trusted.gpg.d/docker.asc
mode: '0664'
force: true
become: true
- name: Add Docker repository.
apt_repository:
repo: "{{ docker_apt_repository }}"
state: present
become: true
- name: Update cache
apt:
update_cache: true
become: true
- name: Install Docker Engine, containerd, and Docker Compose.
apt:
name: "{{ item }}"
state: present
loop:
- docker-ce
- docker-ce-cli
- docker-compose-plugin
- containerd.io
become: true

View File

@@ -1,31 +0,0 @@
---
- name: Create jellyfin-config directory
file:
path: "{{ jellyfin.config }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
- name: Create jellyfin-cache directory
file:
path: "{{ jellyfin.cache }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
- name: Create jellyfin media directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ jellyfin.media.tv }}"
- "{{ jellyfin.media.movies }}"
- "{{ jellyfin.media.music }}"

View File

@@ -1,11 +0,0 @@
---
- name: Create kuma-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ kuma_config }}"
become: true

View File

@@ -1,13 +0,0 @@
---
- name: Create lidarr directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ lidarr_config }}"
- "{{ lidarr_media }}"
- "{{ lidarr_downloads }}"

View File

@@ -1,24 +0,0 @@
---
- include_tasks: install.yml
- include_tasks: user_group_setup.yml
- name: Copy the compose file
template:
src: templates/{{ inventory_hostname }}/compose.yaml
dest: "{{ docker_compose_dir }}/compose.yaml"
register: compose
- include_tasks: "{{ inventory_hostname }}_compose.yml"
tags:
- reload_compose
- name: Update docker Images
shell:
cmd: "docker compose pull"
chdir: "{{ docker_compose_dir }}"
- name: Rebuilding docker images
shell:
cmd: "docker compose up -d --build"
chdir: "{{ docker_compose_dir }}"

View File

@@ -1,5 +0,0 @@
---
- include_tasks: nginx-proxy-manager.yml
tags:
- nginx

View File

@@ -1,13 +0,0 @@
---
- include_tasks: nginx-proxy-manager.yml
tags:
- nginx
- include_tasks: pihole.yml
tags:
- pihole
- include_tasks: gitea-runner.yml
tags:
- gitea-runner

View File

@@ -1,14 +0,0 @@
---
- name: Create netdata dirs
file:
path: "{{ item }}"
owner: 1000
group: 1000
mode: '777'
state: directory
loop:
- "{{ netdata_config }}"
- "{{ netdata_cache }}"
- "{{ netdata_lib }}"
become: true

View File

@@ -1,13 +0,0 @@
---
- name: Create nginx-data directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ nginx.paths.letsencrypt }}"
- "{{ nginx.paths.data }}"
become: yes

View File

@@ -1,14 +0,0 @@
---
- include_tasks: nginx-proxy-manager.yml
tags:
- nginx
- include_tasks: pihole.yml
tags:
- pihole
- include_tasks: gitea-runner.yml
tags:
- gitea-runner

View File

@@ -1,14 +0,0 @@
---
- name: Create pihole-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ docker_dir }}/pihole/etc-pihole/"
- "{{ docker_dir }}/pihole/etc-dnsmasq.d/"
become: true

View File

@@ -1,22 +0,0 @@
---
- name: Create plex-config directory
file:
path: "{{ plex_config }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
- name: Create plex media directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ plex_tv }}"
- "{{ plex_movies }}"
- "{{ plex_music }}"

View File

@@ -1,21 +0,0 @@
---
- name: Create prometheus dirs
file:
path: "{{ item }}"
owner: "{{ prometheus_puid }}"
group: "{{ prometheus_pgid }}"
mode: '755'
state: directory
loop:
- "{{ prometheus_config }}"
- "{{ prometheus_data }}"
become: true
- name: Place prometheus config
template:
owner: "{{ prometheus_puid }}"
group: "{{ prometheus_pgid}}"
src: "templates/aya01/prometheus/prometheus.yml.j2"
dest: "{{ prometheus_config }}/prometheus.yml"
mode: '644'
become: true

View File

@@ -1,11 +0,0 @@
---
- name: Create prowlarr directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ prowlarr_config }}"

View File

@@ -1,12 +0,0 @@
---
- name: Create qbit-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '775'
state: directory
loop:
- "{{ qbit_remote_config }}"
- "{{ qbit_downloads }}"
become: true

View File

@@ -1,12 +0,0 @@
---
- name: Create qbit_torrentleech-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '775'
state: directory
loop:
- "{{ torrentleech_remote_config }}"
- "{{ qbit_downloads }}"
become: true

View File

@@ -1,13 +0,0 @@
---
- name: Create radarr directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ radarr_config }}"
- "{{ radarr_media }}"
- "{{ radarr_downloads }}"

View File

@@ -1,12 +0,0 @@
---
- name: Create soft-serve directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ softserve_data }}"
become: true

View File

@@ -1,13 +0,0 @@
---
- name: Create sonarr directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes
loop:
- "{{ sonarr_config }}"
- "{{ sonarr_media }}"
- "{{ sonarr_downloads }}"

View File

@@ -1,20 +0,0 @@
---
- name: Create swag-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
state: directory
loop:
- "{{ swag_config }}"
- name: Copy site-confs
template:
owner: "{{ puid }}"
group: "{{ pgid }}"
src: "{{ item }}"
dest: "{{ swag_remote_site_confs }}"
mode: '664'
loop: "{{ swag_site_confs }}"
become: true

View File

@@ -1,18 +0,0 @@
---
- name: Create syncthing directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ syncthing_data }}"
become: true
- name: Resolve inotify error for syncthing
template:
src: "templates/aya01/syncthing/syncthing.conf"
dest: "/etc/sysctl.d/syncthing.conf"
mode: "660"
become: true

View File

@@ -1,9 +0,0 @@
---
- name: Create tautulli-config directory
file:
path: "{{ tautulli_config }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
become: yes

View File

@@ -1,18 +0,0 @@
---
- name: Create traefik-config directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
state: directory
loop:
- "{{ docker_dir }}/traefik/etc-traefik/"
- "{{ docker_dir }}/traefik/var-log/"
- name: Copy traefik-config
template:
owner: 1000
src: "templates/common/traefik/etc-traefik/traefik.yml"
dest: "{{ traefik.config }}"
mode: '400'

View File

@@ -1,25 +0,0 @@
---
- name: Ensure group "docker" exists
group:
name: docker
state: present
become: yes
- name: Append the group "docker" to "{{ user }}" groups
ansible.builtin.user:
name: "{{ user }}"
shell: /bin/bash
groups: docker
append: yes
become: yes
- name: Make sure that the docker folders exists
ansible.builtin.file:
path: "{{ item }}"
owner: "{{ user }}"
group: "{{ user }}"
state: directory
loop:
- "{{docker_compose_dir}}"
- "{{docker_dir}}"
become: yes

View File

@@ -1,30 +0,0 @@
---
- name: Create zoneminder user
user:
name: zm
uid: '911'
shell: /bin/false
become: true
- name: Create Zoneminder config directory
file:
path: "{{ item }}"
owner: '911'
group: '911'
mode: '755'
state: directory
loop:
- "{{ zoneminder_config }}"
become: true
- name: Create Zoneminder data directory
file:
path: "{{ item }}"
owner: '911'
group: '911'
mode: '755'
state: directory
loop:
- "{{ zoneminder_data }}"
become: true

View File

@@ -1,518 +0,0 @@
version: '3'
services:
nginx:
container_name: "{{nginx.host}}"
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
networks:
net: {}
ports:
- '{{nginx.endpoints.http}}:80'
- '{{nginx.endpoints.https}}:443'
- '{{nginx.endpoints.admin}}:81'
volumes:
- "{{nginx.paths.data}}:/data"
- "{{nginx.paths.letsencrypt}}:/etc/letsencrypt"
- '/var/run/docker.sock:/var/run/docker.sock'
pihole:
container_name: pihole
image: pihole/pihole:latest
restart: unless-stopped
depends_on:
- nginx
networks:
- net
ports:
- "53:53/tcp"
- "53:53/udp"
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ pihole_config }}:/etc/pihole/"
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{timezone}}
- "WEBPASSWORD={{ vault_aya01_pihole_password }}"
- "ServerIP={{ host.ip }}"
- "INTERFACE=eth0"
- "DNS1=1.1.1.1"
- "DNS1=1.0.0.1"
dns:
- 127.0.0.1
- 1.1.1.1
cap_add:
- NET_ADMIN
syncthing:
image: syncthing/syncthing
container_name: syncthing
restart: unless-stopped
depends_on:
- pihole
networks:
- net
ports:
- 22000:22000/tcp # TCP file transfers
- 22000:22000/udp # QUIC file transfers
- 21027:21027/udp # Receive local discovery broadcasts
volumes:
- "{{syncthing_data}}:/var/syncthing"
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{timezone}}
hostname: syncthing
cupsd:
container_name: cupsd
image: olbat/cupsd
restart: unless-stopped
depends_on:
- pihole
networks:
- net
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{timezone}}
volumes:
- /var/run/dbus:/var/run/dbus
- "{{cupsd_config}}:/etc/cups"
kuma:
container_name: kuma
image: louislam/uptime-kuma:1
restart: unless-stopped
depends_on:
- pihole
networks:
- net
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{timezone}}
ports:
- "{{kuma_port}}:3001"
volumes:
- "{{ kuma_config }}:/app/data"
plex:
image: lscr.io/linuxserver/plex:latest
container_name: plex
restart: unless-stopped
depends_on:
- pihole
networks:
- net
devices:
- /dev/dri:/dev/dri
ports:
- "{{ plex_port }}:32400"
- "1900:1900"
- "3005:3005"
- "5353:5353"
- "32410:32410"
- "8324:8324"
- "32412:32412"
- "32469:32469"
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{timezone}}
- VERSION=docker
volumes:
- "{{ plex_config }}:/config"
- "{{ plex_tv }}:/tv:ro"
- "{{ plex_movies }}:/movies:ro"
- "{{ plex_music }}:/music:ro"
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
restart: unless-stopped
depends_on:
- prowlarr
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
volumes:
- {{ sonarr_config }}:/config
- {{ sonarr_media }}:/tv #optional
- {{ sonarr_downloads }}:/downloads #optional
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
restart: unless-stopped
depends_on:
- prowlarr
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
volumes:
- {{ radarr_config }}:/config
- {{ radarr_media }}:/movies #optional
- {{ radarr_downloads }}:/downloads #optional
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
restart: unless-stopped
depends_on:
- prowlarr
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
volumes:
- {{ lidarr_config }}:/config
- {{ lidarr_media }}:/music #optional
- {{ lidarr_downloads }}:/downloads #optional
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
restart: unless-stopped
depends_on:
- pihole
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
volumes:
- {{ prowlarr_config }}:/config
pastebin:
image: wantguns/bin
container_name: pastebin
restart: unless-stopped
depends_on:
- pihole
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
- ROCKET_PORT={{ bin_port }}
- HOST_URL={{ bin_host }}.{{ aya01_host }}.{{ local_domain }}
volumes:
- {{ bin_upload }}:/app/upload
tautulli:
image: lscr.io/linuxserver/tautulli:latest
container_name: tautulli
restart: unless-stopped
depends_on:
- plex
networks:
- net
environment:
- PUID={{ puid }}
- PGID={{ pgid}}
- TZ={{ timezone }}
ports:
- "{{ tautulli_port }}:8181"
volumes:
- {{ tautulli_config}}:/config
{{ gluetun_host }}:
image: qmcgaw/gluetun
container_name: {{ gluetun_host }}
restart: unless-stopped
networks:
- net
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- {{ gluetun_config }}:/gluetun
environment:
- PUID={{puid}}
- PGID={{pgid}}
- TZ={{ timezone }}
- VPN_SERVICE_PROVIDER=protonvpn
- UPDATER_VPN_SERVICE_PROVIDERS=protonvpn
- UPDATER_PERIOD=24h
- SERVER_COUNTRIES={{ gluetun_country }}
- OPENVPN_USER={{ vault_qbit_vpn_user }}+pmp
- OPENVPN_PASSWORD={{ vault_qbit_vpn_password }}
{{ torrentleech_host }}:
image: qbittorrentofficial/qbittorrent-nox
container_name: {{ torrentleech_host }}
restart: unless-stopped
depends_on:
- gluetun
- sonarr
- radarr
- lidarr
network_mode: "container:{{ gluetun_host }}"
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
- QBT_EULA="accept"
- QBT_WEBUI_PORT="{{ torrentleech_port }}"
volumes:
- {{ torrentleech_remote_config }}:/config
- {{ qbit_downloads }}:/downloads
{{qbit_host}}:
image: qbittorrentofficial/qbittorrent-nox
container_name: {{ qbit_host }}
restart: unless-stopped
depends_on:
- gluetun
- sonarr
- radarr
- lidarr
network_mode: "container:{{ gluetun_host }}"
environment:
- PUID={{ puid }}
- PGID={{ pgid }}
- TZ={{ timezone }}
- QBT_EULA="accept"
- QBT_WEBUI_PORT="{{ qbit_port }}"
volumes:
- {{ qbit_remote_config }}:/config
- {{ qbit_downloads }}:/downloads
{{ prometheus_host }}:
image: prom/prometheus
container_name: {{ prometheus_host }}
restart: unless-stopped
depends_on:
- pihole
networks:
- net
environment:
- PUID={{ prometheus_puid }}
- PGID={{ prometheus_pgid}}
- TZ={{ timezone }}
volumes:
- {{ prometheus_config }}:/etc/prometheus/
- prometheus_data:/prometheus/
{{ grafana_host }}:
image: grafana/grafana-oss
container_name: {{ grafana_host }}
restart: unless-stopped
user: "0:0"
depends_on:
- {{ prometheus_host }}
networks:
- net
environment:
- PUID={{ grafana_puid }}
- PGID={{ grafana_pgid }}
- TZ={{ timezone }}
volumes:
- {{ grafana_data }}:/var/lib/grafana/
- {{ grafana_config }}:/etc/grafana/
ddns-updater:
container_name: ddns-updater
image: "ghcr.io/qdm12/ddns-updater"
restart: unless-stopped
depends_on:
- pihole
networks:
net: {}
volumes:
- "{{ ddns_data }}:/updater/data/"
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
restart: unless-stopped
depends_on:
- pihole
networks:
net: {}
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ ha_config }}:/config/"
privileged: true
ports:
- "{{ ha_port }}:8123"
- 4357:4357
- 5683:5683
- 5683:5683/udp
{{stirling.host}}:
container_name: {{stirling.host}}
image: frooodle/s-pdf:latest
restart: unless-stopped
depends_on:
- pihole
networks:
net: {}
{{ jellyfin.host }}:
container_name: {{ jellyfin.host }}
image: jellyfin/jellyfin
restart: 'unless-stopped'
depends_on:
- pihole
networks:
net: {}
devices:
- /dev/dri:/dev/dri
volumes:
- {{ jellyfin.config }}:/config
- {{ jellyfin.cache }}:/cache
- {{ jellyfin.media.tv }}:/tv:ro
- {{ jellyfin.media.movies }}:/movies:ro
- {{ jellyfin.media.music }}:/music:ro
ports:
- "{{ jellyfin.port }}:{{ jellyfin.port }}"
broker:
container_name: {{ paperless.redis.host }}
image: docker.io/library/redis:7
restart: unless-stopped
depends_on:
- pihole
networks:
- net
volumes:
- {{paperless.redis.data}}:/data
db:
container_name: {{ paperless.db.host }}
image: docker.io/library/postgres:15
restart: unless-stopped
depends_on:
- pihole
networks:
- net
volumes:
- {{paperless.db.data}}:/var/lib/postgresql/data
environment:
POSTGRES_DB: {{ paperless.db.db }}
POSTGRES_USER: {{ paperless.db.user }}
POSTGRES_PASSWORD: {{ paperless.db.password }}
paperless:
container_name: {{ paperless.host }}
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- db
- broker
networks:
- net
healthcheck:
test: ["CMD", "curl", "-fs", "-S", "--max-time", "2", "http://localhost:{{ paperless.port }}"]
interval: 30s
timeout: 10s
retries: 5
volumes:
- {{ paperless.data.data }}:/usr/src/paperless/data
- {{ paperless.data.media }}:/usr/src/paperless/media
- {{ paperless.data.export }}:/usr/src/paperless/export
- {{ paperless.data.consume }}:/usr/src/paperless/consume
environment:
- "PAPERLESS_REDIS=redis://broker:6379"
- "PAPERLESS_DBHOST=db"
- "PAPERLESS_DBUSER={{paperless.db.user}}"
- "PAPERLESS_DBPASS={{paperless.db.password}}"
- "USERMAP_UID={{ puid }}"
- "USERMAP_GID={{ pgid}}"
- "PAPERLESS_URL=https://{{paperless.host}}.{{ host.hostname }}.{{ backup_domain }}"
- "PAPERLESS_TIME_ZONE={{ timezone }}"
- "PAPERLESS_OCR_LANGUAGE=deu"
{{ homarr.host }}:
container_name: {{ homarr.host }}
image: ghcr.io/ajnart/homarr:latest
restart: unless-stopped
depends_on:
- pihole
networks:
- net
volumes:
- {{ homarr.volumes.configs }}:/app/data/configs
- {{ homarr.volumes.data }}:/data
- {{ homarr.volumes.icons }}:/app/public/icons
{{ gitea.host }}:
container_name: {{ gitea.host }}
image: gitea/gitea:1.20.5-rootless
restart: unless-stopped
depends_on:
- pihole
networks:
- net
volumes:
- {{ gitea.volumes.data }}:/var/lib/gitea
- {{ gitea.volumes.config }}:/etc/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "{{ gitea.ports.http }}:3000"
- "{{ gitea.ports.ssh }}:2222"
{{ gitea.runner.host }}:
container_name: {{ gitea.runner.host }}
image: gitea/act_runner:nightly
restart: unless-stopped
depends_on:
- {{ gitea.host }}
networks:
- net
volumes:
- "{{ gitea.runner.config_file }}:/config.yaml"
- "{{ gitea.runner.volumes.data }}:/data"
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- "GITEA_INSTANCE_URL={{ gitea.url }}"
- "GITEA_RUNNER_REGISTRATION_TOKEN={{ gitea.runner.token }}"
- "GITEA_RUNNER_NAME: {{ gitea.runner.name }}"
- "CONFIG_FILE: /config.yaml"
{{ jellyseer.host }}:
container_name: {{ jellyseer.host }}
image: fallenbagel/jellyseerr:latest
restart: unless-stopped
environment:
- LOG_LEVEL=info
- TZ={{ timezone }}
depends_on:
- {{ jellyfin.host }}
networks:
- net
volumes:
- {{ jellyseer.volumes.config }}:/app/config
networks:
zoneminder:
driver: bridge
ipam:
driver: default
config:
- subnet: {{ zoneminder_network }}
net:
driver: bridge
ipam:
driver: default
config:
- subnet: {{ docker_network }}
volumes:
prometheus_data: {}

View File

@@ -1,196 +0,0 @@
#
# Configuration file for the CUPS scheduler. See "man cupsd.conf" for a
# complete description of this file.
#
# Log general information in error_log - change "warn" to "debug"
# for troubleshooting...
LogLevel warn
PageLogFormat
ServerAlias *
# Specifies the maximum size of the log files before they are rotated. The value "0" disables log rotation.
MaxLogSize 0
# Default error policy for printers
ErrorPolicy retry-job
# Allow remote access
Listen *:631
# Show shared printers on the local network.
Browsing Yes
BrowseLocalProtocols dnssd
# Default authentication type, when authentication is required...
DefaultAuthType Basic
DefaultEncryption IfRequested
# Web interface setting...
WebInterface Yes
# Timeout after cupsd exits if idle (applied only if cupsd runs on-demand - with -l)
IdleExitTimeout 60
# Restrict access to the server...
<Location />
Order allow,deny
Allow all
</Location>
# Restrict access to the admin pages...
<Location /admin>
Order allow,deny
Allow all
</Location>
# Restrict access to configuration files...
<Location /admin/conf>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Restrict access to log files...
<Location /admin/log>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Set the default printer/job policies...
<Policy default>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default CUPS-Get-Devices>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>
# Set the authenticated printer/job policies...
<Policy authenticated>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Default
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>
# Set the kerberized printer/job policies...
<Policy kerberos>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Negotiate
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>

View File

@@ -1,11 +0,0 @@
{
"settings": [
{
"provider": "namecheap",
"domain": "{{ local_domain }}",
"host": "{{ local_subdomains }}",
"password": "{{ vault_ddns_local_password }}",
"provider_ip": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
devices:
- name: mikrotik
address: "{{ e_mikrotik_ip }}"
user: "{{ prm_user }}"
password: "{{ vault_prm_user_password }}"
features:
bgp: false
dhcp: true
dhcpv6: true
dhcpl: true
routes: true
pools: true
optics: true

View File

@@ -1,46 +0,0 @@
# Sample config for Prometheus.
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: '{{ user }}'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
scrape_configs:
- job_name: 'node'
scrape_interval: 10s
scrape_timeout: 10s
tls_config:
insecure_skip_verify: true
static_configs:
- targets: ['{{ aya01_ip }}:{{node_exporter.port}}']
- targets: ['{{ mii_ip }}:{{node_exporter.port}}']
- targets: ['{{ pi_ip }}:{{node_exporter.port}}']
- targets: ['{{ naruto_ip }}:{{node_exporter.port}}']
- targets: ['{{ inko_ip }}:{{node_exporter.port}}']
- job_name: 'mikrotik'
static_configs:
- targets:
- {{ snmp_exporter_target }}
metrics_path: /snmp
params:
module: [mikrotik]
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: {{ aya01_ip }}:{{ snmp_exporter_port }} # The SNMP exporter's real hostname:port.
- job_name: 'SMART'
static_configs:
- targets: ['{{ aya01_ip }}:{{smart_exporter.port}}']

View File

@@ -1 +0,0 @@
fs.inotify.max_user_watches=204800

View File

@@ -1,36 +0,0 @@
## traefik.yml
# Entry Points
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
# Docker configuration backend
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedbydefault: "false"
# API and dashboard configuration
api:
insecure: true
dashboard: true
log:
filePath: "/var/log/traefik.log"
accessLog:
filePath: "/var/log/access.log"
certificatesResolvers:
myresolver:
acme:
email: "me+cert@tudattr.dev"
storage: "/letsencrypt/acme.json"
dnsChallenge:
provider: "namecheap"
metrics:
prometheus:
entrypoint: "traefik"

View File

@@ -1,25 +0,0 @@
version: '3'
services:
nginx:
container_name: "{{nginx.host}}"
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
networks:
net: {}
ports:
- '{{nginx.endpoints.http}}:80'
- '{{nginx.endpoints.https}}:443'
- '{{nginx.endpoints.admin}}:81'
volumes:
- "{{nginx.paths.data}}:/data"
- "{{nginx.paths.letsencrypt}}:/etc/letsencrypt"
- '/var/run/docker.sock:/var/run/docker.sock'
networks:
net:
driver: bridge
ipam:
# driver: default
config:
- subnet: 172.16.69.0/24
gateway: 172.16.69.1

View File

@@ -1,73 +0,0 @@
## Version 2023/02/09 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/site-confs/default.conf.sample
# redirect all traffic to https
server {
listen 80 default_server;
listen [::]:80 default_server;
location / {
return 301 https://$host$request_uri;
}
}
# main server block
server {
listen 443 ssl http2 default_server;
listen [::]:443 ssl http2 default_server;
server_name _;
root /config/www;
index index.html index.htm index.php;
# enable subfolder method reverse proxy confs
include /config/nginx/proxy-confs/*.subfolder.conf;
# enable for ldap auth (requires ldap-location.conf in the location block)
#include /config/nginx/ldap-server.conf;
# enable for Authelia (requires authelia-location.conf in the location block)
#include /config/nginx/authelia-server.conf;
# enable for Authentik (requires authentik-location.conf in the location block)
#include /config/nginx/authentik-server.conf;
location / {
# enable for basic auth
#auth_basic "Restricted";
#auth_basic_user_file /config/nginx/.htpasswd;
# enable for ldap auth (requires ldap-server.conf in the server block)
#include /config/nginx/ldap-location.conf;
# enable for Authelia (requires authelia-server.conf in the server block)
#include /config/nginx/authelia-location.conf;
# enable for Authentik (requires authentik-server.conf in the server block)
#include /config/nginx/authentik-location.conf;
try_files $uri $uri/ /index.html /index.php$is_args$args =404;
}
location /ip {
add_header Content-Type "text/plain";
return 200 '$remote_addr\n';
}
location ~ ^(.+\.php)(.*)$ {
fastcgi_split_path_info ^(.+\.php)(.*)$;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
}
# deny access to .htaccess/.htpasswd files
location ~ /\.ht {
deny all;
}
}
# enable subdomain method reverse proxy confs
include /config/nginx/proxy-confs/*.subdomain.conf;
# enable proxy cache for auth
proxy_cache_path cache/ keys_zone=auth_cache:10m;

View File

@@ -1,31 +0,0 @@
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name {{ plex_host }}.{{ remote_domain }};
include /config/nginx/ssl.conf;
client_max_body_size 0;
proxy_redirect off;
proxy_buffering off;
location / {
include /config/nginx/resolver.conf;
proxy_pass http://{{ plex_host }}.{{ aya01_host }}.{{ local_domain }};
proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier;
proxy_set_header X-Plex-Device $http_x_plex_device;
proxy_set_header X-Plex-Device-Name $http_x_plex_device_name;
proxy_set_header X-Plex-Platform $http_x_plex_platform;
proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version;
proxy_set_header X-Plex-Product $http_x_plex_product;
proxy_set_header X-Plex-Token $http_x_plex_token;
proxy_set_header X-Plex-Version $http_x_plex_version;
proxy_set_header X-Plex-Nocache $http_x_plex_nocache;
proxy_set_header X-Plex-Provides $http_x_plex_provides;
proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor;
proxy_set_header X-Plex-Model $http_x_plex_model;
}
}

View File

@@ -1,20 +0,0 @@
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name {{ stub_host }}.{{ local_domain }};
access_log off;
allow 192.168.20.12;
deny all;
include /config/nginx/ssl.conf;
client_max_body_size 0;
location /stub_status {
include /config/nginx/resolver.conf;
stub_status on;
proxy_pass http://{{ stub_host }}.{{ aya01_host }}.{{ local_domain }};
}
}

View File

@@ -1,34 +0,0 @@
## Version 2023/02/05
# make sure that your tautulli container is named tautulli
# make sure that your dns has a cname set for tautulli
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name {{ tautulli_host }}.{{ remote_domain }};
include /config/nginx/ssl.conf;
client_max_body_size 0;
location / {
include /config/nginx/resolver.conf;
proxy_pass http://{{ tautulli_host }}.{{ aya01_host }}.{{ local_domain }};
}
location ~ (/tautulli)?/api {
include /config/nginx/resolver.conf;
proxy_pass http://{{ tautulli_host }}.{{ aya01_host }}.{{ local_domain }};
}
location ~ (/tautulli)?/newsletter {
include /config/nginx/resolver.conf;
proxy_pass http://{{ tautulli_host }}.{{ aya01_host }}.{{ local_domain }};
}
location ~ (/tautulli)?/image {
include /config/nginx/resolver.conf;
proxy_pass http://{{ tautulli_host }}.{{ aya01_host }}.{{ local_domain }};
}
}

View File

@@ -1,17 +0,0 @@
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name {{ kuma_host }}.{{ remote_domain }};
include /config/nginx/ssl.conf;
client_max_body_size 0;
location / {
include /config/nginx/resolver.conf;
proxy_pass http://{{ kuma_host }}.{{ aya01_host }}.{{ local_domain }};
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}

View File

@@ -1,42 +0,0 @@
version: '3'
services:
nginx:
container_name: "{{nginx.host}}"
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
networks:
net: {}
ports:
- '{{nginx.endpoints.http}}:80'
- '{{nginx.endpoints.https}}:443'
- '{{nginx.endpoints.admin}}:81'
volumes:
- "{{nginx.paths.data}}:/data"
- "{{nginx.paths.letsencrypt}}:/etc/letsencrypt"
- '/var/run/docker.sock:/var/run/docker.sock'
{{ gitea.runner.host }}:
container_name: {{ gitea.runner.host }}
image: gitea/act_runner:nightly
restart: unless-stopped
depends_on:
- nginx
networks:
- net
volumes:
- "{{ gitea.runner.config_file }}:/config.yaml"
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- "GITEA_INSTANCE_URL={{ gitea.url }}"
- "GITEA_RUNNER_REGISTRATION_TOKEN={{ gitea.runner.token }}"
- "GITEA_RUNNER_NAME: {{ gitea.runner.name }}"
- "CONFIG_FILE: /config.yaml"
networks:
net:
driver: bridge
ipam:
# driver: default
config:
- subnet: 172.16.69.0/24
gateway: 172.16.69.1

View File

@@ -1,68 +0,0 @@
version: '3'
services:
nginx:
container_name: "{{nginx.host}}"
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
networks:
net: {}
ports:
- '{{nginx.endpoints.http}}:80'
- '{{nginx.endpoints.https}}:443'
- '{{nginx.endpoints.admin}}:81'
volumes:
- "{{nginx.paths.data}}:/data"
- "{{nginx.paths.letsencrypt}}:/etc/letsencrypt"
- '/var/run/docker.sock:/var/run/docker.sock'
pihole:
container_name: pihole
image: pihole/pihole:latest
restart: unless-stopped
depends_on:
- nginx
networks:
net: {}
ports:
- "53:53/tcp"
- "53:53/udp"
environment:
- "WEBPASSWORD={{ vault.pi.pihole.password }}"
- "ServerIP=192.168.20.11"
- "INTERFACE=eth0"
- "DNS1=1.1.1.1"
- "DNS1=1.0.0.1"
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "{{ pihole_config }}:/etc/pihole/"
- "{{ pihole_dnsmasq }}:/etc/dnsmasq.d/"
dns:
- 127.0.0.1
- 1.1.1.1
cap_add:
- NET_ADMIN
{{ gitea.runner.host }}:
container_name: {{ gitea.runner.host }}
image: gitea/act_runner:nightly
restart: unless-stopped
depends_on:
- nginx
networks:
- net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- {{ gitea.runner.volumes.data }}:/data
environment:
- "GITEA_INSTANCE_URL={{ gitea.url }}"
- "GITEA_RUNNER_REGISTRATION_TOKEN={{ gitea.runner.token }}"
networks:
net:
driver: bridge
ipam:
# driver: default
config:
- subnet: 172.16.69.0/24
gateway: 172.16.69.1

View File

@@ -0,0 +1,8 @@
go_arch_map:
i386: "386"
x86_64: "amd64"
aarch64: "arm64"
armv7l: "armv7"
armv6l: "armv6"
go_arch: "{{ go_arch_map[ansible_architecture] | default(ansible_architecture) }}"

View File

@@ -1,24 +0,0 @@
---
- name: Copy powertop service
template:
src: templates/powertop.service
dest: /etc/systemd/system/powertop.service
become: true
- name: Reload all services
ansible.builtin.systemd:
daemon_reload: yes
become: true
- name: Start and enable the new powertop service
ansible.builtin.systemd:
state: started
enabled: true
name: powertop
become: true
- name: Copy hdparm.conf
template:
src: templates/hdparm.conf
dest: /etc/hdparm.conf
become: true

View File

@@ -1,15 +0,0 @@
---
- name: Update cache
apt:
update_cache: true
become: true
- name: Install packages
apt:
name: "{{ item }}"
state: present
loop:
- powertop
- hdparm
become: true

View File

@@ -1,3 +0,0 @@
---
- include_tasks: install.yml
- include_tasks: configure.yml

View File

@@ -1,18 +0,0 @@
quiet
/dev/sda {
apm = 128
spindown_time = 240
}
/dev/sdb {
apm = 128
spindown_time = 240
}
/dev/sdc {
apm = 128
spindown_time = 240
}
/dev/sdd {
apm = 128
spindown_time = 240
}

View File

@@ -1,11 +0,0 @@
[Unit]
Description=PowerTOP auto tune
[Service]
Type=oneshot
Environment="TERM=dumb"
RemainAfterExit=true
ExecStart=/usr/sbin/powertop --auto-tune
[Install]
WantedBy=multi-user.target

View File

@@ -1,15 +0,0 @@
---
- name: Copy "{{ samba.config }}"
template:
src: "{{ samba.config }}"
dest: /etc/samba/smb.conf
become: true
register: smbconf
- name: Restart nmbd.service
systemd:
name: nmbd
state: restarted
enabled: yes
become: true
when: smbconf.changed

View File

@@ -1,46 +0,0 @@
---
- name: Update and upgrade packages
apt:
update_cache: true
upgrade: true
autoremove: true
become: true
- name: Install Samba dependencies
apt:
name: "{{ samba.dependencies }}"
state: present
become: true
- name: Add group "{{ samba.group }}"
group:
name: "{{ samba.group }}"
state: present
become: true
- name: Change permission on share
file:
path: "{{ item }}"
group: "{{ samba.group }}"
mode: "2770"
become: true
loop:
- "{{ samba.shares.media.path }}"
- "{{ samba.shares.paperless.path }}"
- name: Add user "{{ samba.user }}"
user:
name: "{{ samba.user }}"
shell: "/sbin/nologin"
groups: "{{ samba.group }}"
append: true
become: true
register: new_user
- name: Add password to "{{ samba.user }}"
shell:
cmd: smbpasswd -a "{{ samba.user }}"
stdin: "{{ host.samba.password }}\n{{ host.samba.password }}"
become: true
when: new_user.changed

View File

@@ -1,3 +0,0 @@
---
- include_tasks: install.yaml
- include_tasks: config.yaml

View File

@@ -1,14 +0,0 @@
[{{ samba.shares.media.name }}]
comment = {{ samba.shares.media.name }}
path = "{{ samba.shares.media.path }}"
writable = no
guest ok = no
valid users = "@{{samba.group}}"
[{{ samba.shares.paperless.name }}]
comment = {{ samba.shares.paperless.name }}
path = "{{ samba.shares.paperless.path }}"
writable = yes
guest ok = no
valid users = "@{{samba.group}}"
create mask = 755

View File

@@ -1,8 +0,0 @@
---
- name: Upgrade shelly
uri:
url: {{ip}}/ota?url=http://archive.shelly-tools.de/version/v1.9.4/SHBDUO-1.zip
- name: Connect shelly to wifi
uri:
url: {{ip}}/settings/sta?enabled=1&ssid={{SSID}}&key={{password}}&ipv4_method=dhcp

View File

@@ -1,2 +0,0 @@
---
- include_tasks: init.yaml

View File

@@ -1,18 +0,0 @@
---
- name: Determine latest GitHub release (local)
delegate_to: localhost
uri:
url: "https://api.github.com/repos/prometheus-community/smartctl_exporter/releases/{{ smart_exporter.version }}"
body_format: json
register: _github_release
until: _github_release.status == 200
retries: 3
- name: Set smart_exporter_version
set_fact:
smart_exporter_version: "{{ _github_release.json.tag_name
| regex_replace('^v?([0-9\\.]+)$', '\\1') }}"
- name: Set smart_exporter_download_url
set_fact:
smart_exporter_download_url: "https://github.com/prometheus-community/smartctl_exporter/releases/download/v{{ smart_exporter_version }}/smartctl_exporter-{{ smart_exporter_version }}.linux-{{ go_arch }}.tar.gz"

View File

@@ -1,30 +0,0 @@
---
- name: Download/Extract "{{ smart_exporter_download_url }}"
unarchive:
src: "{{ smart_exporter_download_url }}"
dest: /tmp/
remote_src: true
mode: 755
- name: Move smart_exporter into path
copy:
src: "/tmp/smartctl_exporter-{{ smart_exporter_version }}.linux-{{ go_arch }}/smartctl_exporter"
dest: "{{ smart_exporter.bin_path }}"
mode: 755
remote_src: true
become: true
- name: Create smart_exporter user.
user:
name: smart_exporter
shell: /sbin/nologin
state: present
become: true
- name: Copy the smart_exporter systemd unit file.
template:
src: smart_exporter.service.j2
dest: /etc/systemd/system/smart_exporter.service
mode: 0644
register: smart_exporter_service
become: true

View File

@@ -1,4 +0,0 @@
---
- include_tasks: get_version.yml
- include_tasks: install.yml
- include_tasks: systemd.yml

View File

@@ -1,9 +0,0 @@
---
- name: Ensure smart_exporter is running and enabled at boot.
service:
daemon_reload: true
name: smart_exporter
state: restarted
enabled: true
when: smart_exporter_service is changed
become: true

View File

@@ -1,19 +0,0 @@
[Unit]
Description=smartctl exporter service
After=network-online.target
[Service]
Type=simple
PIDFile=/run/smartctl_exporter.pid
ExecStart={{ smart_exporter.bin_path }}
User=root
Group=root
SyslogIdentifier=smartctl_exporter
Restart=on-failure
RemainAfterExit=no
RestartSec=100ms
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

View File

@@ -1,15 +0,0 @@
- name: Create snmp-exporter container
docker_container:
image: prom/snmp-exporter
name: "{{ snmp_exporter_host }}"
restart_policy: unless-stopped
networks:
- name: compose_net
env:
PUID: "{{ puid }}"
PGID: "{{ pgid}}"
TZ: "{{ timezone }}"
volumes:
- "{{ snmp_exporter_config }}:/etc/snmp_exporter/"
ports:
- "{{ snmp_exporter_port }}:9116"

View File

@@ -1,3 +0,0 @@
---
- include_tasks: setup.yml
- include_tasks: docker.yml

View File

@@ -1,20 +0,0 @@
---
- name: Create snmp_exporter directories
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ snmp_exporter_config }}"
become: true
- name: Copy snmp_exporter config
template:
owner: "{{ puid }}"
group: "{{ pgid }}"
src: "snmp.yml.j2"
dest: "{{ snmp_exporter_config }}/snmp.yml"
mode: '644'
become: true

File diff suppressed because it is too large Load Diff