Added soft-serve
Added graphana/prometheus (not working yet) Signed-off-by: TuDatTr <tuan-dat.tran@tudattr.dev>pull/1/head
parent
2ba4259dd6
commit
1fdeaf2ded
12
README.md
12
README.md
|
@ -138,8 +138,18 @@ used ipv4:
|
||||||
interface: wg_tunnel01
|
interface: wg_tunnel01
|
||||||
|
|
||||||
## troubleshooting
|
## troubleshooting
|
||||||
when problems with docker: `docker system prune -a`
|
### Docker networking problem
|
||||||
|
`docker system prune -a`
|
||||||
|
### Time problems (NTP service: n/a)
|
||||||
|
systemctl status systemd-timesyncd.service
|
||||||
|
when not available
|
||||||
|
sudo apt install systemd-timesyncd/stable
|
||||||
|
### Syncthing inotify
|
||||||
|
echo "fs.inotify.max_user_watches=204800" | sudo tee -a /etc/sysctl.conf
|
||||||
|
https://forum.cloudron.io/topic/7163/how-to-increase-inotify-limit-for-syncthing/2
|
||||||
|
|
||||||
## Todo
|
## Todo
|
||||||
- Role to load customization/configurations from backup to servers
|
- Role to load customization/configurations from backup to servers
|
||||||
- split docker containers to different composes or tag
|
- split docker containers to different composes or tag
|
||||||
|
- syncthing and grafana config on hosts
|
||||||
|
- grafana/prometheus
|
||||||
|
|
|
@ -9,5 +9,3 @@
|
||||||
- backup
|
- backup
|
||||||
- role: power_management
|
- role: power_management
|
||||||
- role: docker
|
- role: docker
|
||||||
tags:
|
|
||||||
- reload_compose
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ common_packages:
|
||||||
- curl
|
- curl
|
||||||
- tree
|
- tree
|
||||||
- rsync
|
- rsync
|
||||||
|
- systemd-timesyncd
|
||||||
|
|
||||||
#
|
#
|
||||||
# Docker
|
# Docker
|
||||||
|
@ -65,6 +66,11 @@ grafana_data: "{{docker_data_dir}}/grafana/"
|
||||||
grafana_log: "{{docker_dir}}/grafana/logs/"
|
grafana_log: "{{docker_dir}}/grafana/logs/"
|
||||||
grafana_config: "{{docker_dir}}/grafana/config/"
|
grafana_config: "{{docker_dir}}/grafana/config/"
|
||||||
|
|
||||||
|
prometheus_data: "{{docker_data_dir}}/prometheus/"
|
||||||
|
prometheus_config: "{{docker_dir}}/prometheus/config"
|
||||||
|
|
||||||
|
softserve_data: "{{docker_dir}}/softserve/data"
|
||||||
|
|
||||||
#
|
#
|
||||||
# pi
|
# pi
|
||||||
#
|
#
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
loop: "{{ backblaze_paths | dict2items | subelements('value') }}"
|
loop: "{{ backblaze_paths | dict2items | subelements('value') }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Shut down docker
|
- name: Restart docker
|
||||||
shell:
|
shell:
|
||||||
cmd: "docker compose down --remove-orphans"
|
cmd: "docker compose up -d"
|
||||||
chdir: "{{ docker_compose_dir }}"
|
chdir: "{{ docker_compose_dir }}"
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: 911
|
owner: 911
|
||||||
|
group: 911
|
||||||
mode: '700'
|
mode: '700'
|
||||||
state: directory
|
state: directory
|
||||||
loop:
|
loop:
|
||||||
|
@ -20,6 +21,7 @@
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: 911
|
owner: 911
|
||||||
|
group: 911
|
||||||
mode: '755'
|
mode: '755'
|
||||||
state: directory
|
state: directory
|
||||||
loop:
|
loop:
|
||||||
|
@ -29,17 +31,26 @@
|
||||||
- name: Create syncthing directory
|
- name: Create syncthing directory
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: 1000
|
owner: "{{ puid }}"
|
||||||
|
group: "{{ pgid }}"
|
||||||
mode: '755'
|
mode: '755'
|
||||||
state: directory
|
state: directory
|
||||||
loop:
|
loop:
|
||||||
- "{{ syncthing_data }}"
|
- "{{ syncthing_data }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
- name: Resolve inotify error for syncthing
|
||||||
|
template:
|
||||||
|
src: "templates/aya01/syncthing/syncthing.conf"
|
||||||
|
dest: "/etc/sysctl.d/syncthing.conf"
|
||||||
|
mode: "660"
|
||||||
|
become: true
|
||||||
|
|
||||||
- name: Create grafana data directory
|
- name: Create grafana data directory
|
||||||
file:
|
file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: 1000
|
owner: "{{ puid }}"
|
||||||
|
group: "{{ pgid }}"
|
||||||
mode: '755'
|
mode: '755'
|
||||||
state: directory
|
state: directory
|
||||||
loop:
|
loop:
|
||||||
|
@ -48,6 +59,25 @@
|
||||||
- "{{ grafana_config }}"
|
- "{{ grafana_config }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
|
- name: Copy grafana config
|
||||||
|
template:
|
||||||
|
owner: "{{ puid }}"
|
||||||
|
src: "templates/aya01/grafana/etc-grafana/grafana.ini"
|
||||||
|
dest: "{{ grafana_config }}/grafana.ini"
|
||||||
|
mode: '660'
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Create soft-serve directory
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
owner: "{{ puid }}"
|
||||||
|
group: "{{ pgid }}"
|
||||||
|
mode: '755'
|
||||||
|
state: directory
|
||||||
|
loop:
|
||||||
|
- "{{ softserve_data }}"
|
||||||
|
become: true
|
||||||
|
|
||||||
# Todo, check if docker compose is running
|
# Todo, check if docker compose is running
|
||||||
# - name: Shut down docker
|
# - name: Shut down docker
|
||||||
# shell:
|
# shell:
|
||||||
|
|
|
@ -16,7 +16,6 @@ services:
|
||||||
- "MAX_LOG_SIZE_BYTES=1000000"
|
- "MAX_LOG_SIZE_BYTES=1000000"
|
||||||
- "MAX_LOG_NUMBER=20"
|
- "MAX_LOG_NUMBER=20"
|
||||||
- "TZ=Europe/Berlin"
|
- "TZ=Europe/Berlin"
|
||||||
|
|
||||||
zoneminder:
|
zoneminder:
|
||||||
image: ghcr.io/zoneminder-containers/zoneminder-base:latest
|
image: ghcr.io/zoneminder-containers/zoneminder-base:latest
|
||||||
restart: always
|
restart: always
|
||||||
|
@ -45,6 +44,7 @@ services:
|
||||||
- "MAX_LOG_SIZE_BYTES=1000000"
|
- "MAX_LOG_SIZE_BYTES=1000000"
|
||||||
- "MAX_LOG_NUMBER=20"
|
- "MAX_LOG_NUMBER=20"
|
||||||
- "TZ=Europe/Berlin"
|
- "TZ=Europe/Berlin"
|
||||||
|
|
||||||
pihole:
|
pihole:
|
||||||
container_name: pihole
|
container_name: pihole
|
||||||
image: pihole/pihole:latest
|
image: pihole/pihole:latest
|
||||||
|
@ -76,6 +76,7 @@ services:
|
||||||
- "traefik.http.routers.pihole.rule=Host(`pihole.{{local_domain}}`)"
|
- "traefik.http.routers.pihole.rule=Host(`pihole.{{local_domain}}`)"
|
||||||
- "traefik.http.routers.pihole.entrypoints=web"
|
- "traefik.http.routers.pihole.entrypoints=web"
|
||||||
- "traefik.http.services.pihole.loadbalancer.server.port=8089"
|
- "traefik.http.services.pihole.loadbalancer.server.port=8089"
|
||||||
|
|
||||||
syncthing:
|
syncthing:
|
||||||
image: syncthing/syncthing
|
image: syncthing/syncthing
|
||||||
container_name: syncthing
|
container_name: syncthing
|
||||||
|
@ -83,8 +84,8 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- "PUID={{ puid }}"
|
||||||
- PGID=1000
|
- "PGID={{ pgid }}"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{syncthing_data}}:/var/syncthing"
|
- "{{syncthing_data}}:/var/syncthing"
|
||||||
ports:
|
ports:
|
||||||
|
@ -93,23 +94,29 @@ services:
|
||||||
- 22000:22000/udp # QUIC file transfers
|
- 22000:22000/udp # QUIC file transfers
|
||||||
- 21027:21027/udp # Receive local discovery broadcasts
|
- 21027:21027/udp # Receive local discovery broadcasts
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
grafana:
|
grafana:
|
||||||
image: grafana/grafana-oss
|
image: grafana/grafana-oss
|
||||||
container_name: grafana
|
container_name: grafana
|
||||||
hostname: grafana
|
hostname: grafana
|
||||||
|
user: "{{ puid }}:{{ pgid }}"
|
||||||
networks:
|
networks:
|
||||||
- net
|
- net
|
||||||
environment:
|
environment:
|
||||||
- "PUID={{ puid }}"
|
|
||||||
- "PGID={{ pgid }}"
|
|
||||||
- "GF_LOG_MODE=console file"
|
- "GF_LOG_MODE=console file"
|
||||||
volumes:
|
volumes:
|
||||||
- "{{ grafana_data }}:/var/lib/grafana/"
|
- "{{ grafana_data }}:/var/lib/grafana/"
|
||||||
- "{{ grafana_log }}:/var/log/grafana/"
|
- "{{ grafana_log }}:/var/log/grafana/"
|
||||||
- "{{ grafana_config }}:/etc/grafana/"
|
|
||||||
ports:
|
ports:
|
||||||
- 3000:3000
|
- 3000:3000
|
||||||
|
soft-serve:
|
||||||
|
image: charmcli/soft-serve:latest
|
||||||
|
container_name: soft-serve
|
||||||
|
volumes:
|
||||||
|
- "{{ softserve_data }}:/soft-serve"
|
||||||
|
ports:
|
||||||
|
- 23231:23231
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
zoneminder:
|
zoneminder:
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
# Sample config for Prometheus.
|
||||||
|
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||||
|
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Attach these labels to any time series or alerts when communicating with
|
||||||
|
# external systems (federation, remote storage, Alertmanager).
|
||||||
|
external_labels:
|
||||||
|
monitor: 'Mikrotik'
|
||||||
|
|
||||||
|
# Alertmanager configuration
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- static_configs:
|
||||||
|
- targets: ['localhost:9093']
|
||||||
|
|
||||||
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
|
rule_files:
|
||||||
|
# - "first_rules.yml"
|
||||||
|
# - "second_rules.yml"
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
|
||||||
|
# Override the global default and scrape targets from this job every 5 seconds.
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 10s
|
||||||
|
tls_config:
|
||||||
|
insecure_skip_verify: true
|
||||||
|
# metrics_path defaults to '/metrics'
|
||||||
|
# scheme defaults to 'http'.
|
||||||
|
|
||||||
|
#static_configs:
|
||||||
|
#- targets: ['localhost:9090']
|
||||||
|
|
||||||
|
- job_name: Mikrotik
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- {{ mikrotik_ip }} # mikrotik_ip
|
||||||
|
metrics_path: /snmp
|
||||||
|
params:
|
||||||
|
module: [mikrotik]
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [__address__]
|
||||||
|
target_label: __param_target
|
||||||
|
- source_labels: [__param_target]
|
||||||
|
target_label: instance
|
||||||
|
- target_label: __address__
|
||||||
|
replacement: mk_snmp_exporter:9116 # The SNMP exporter's real hostname:port.
|
|
@ -0,0 +1 @@
|
||||||
|
fs.inotify.max_user_watches=204800
|
Loading…
Reference in New Issue