Added soft-serve

Added graphana/prometheus (not working yet)

Signed-off-by: TuDatTr <tuan-dat.tran@tudattr.dev>
pull/1/head
TuDatTr 2022-12-21 22:28:46 +01:00
parent 2ba4259dd6
commit 1fdeaf2ded
9 changed files with 121 additions and 15 deletions

View File

@ -138,8 +138,18 @@ used ipv4:
interface: wg_tunnel01
## troubleshooting
when problems with docker: `docker system prune -a`
### Docker networking problem
`docker system prune -a`
### Time problems (NTP service: n/a)
systemctl status systemd-timesyncd.service
when not available
sudo apt install systemd-timesyncd/stable
### Syncthing inotify
echo "fs.inotify.max_user_watches=204800" | sudo tee -a /etc/sysctl.conf
https://forum.cloudron.io/topic/7163/how-to-increase-inotify-limit-for-syncthing/2
## Todo
- Role to load customization/configurations from backup to servers
- split docker containers to different composes or tag
- syncthing and grafana config on hosts
- grafana/prometheus

View File

@ -9,5 +9,3 @@
- backup
- role: power_management
- role: docker
tags:
- reload_compose

View File

@ -36,6 +36,7 @@ common_packages:
- curl
- tree
- rsync
- systemd-timesyncd
#
# Docker
@ -65,6 +66,11 @@ grafana_data: "{{docker_data_dir}}/grafana/"
grafana_log: "{{docker_dir}}/grafana/logs/"
grafana_config: "{{docker_dir}}/grafana/config/"
prometheus_data: "{{docker_data_dir}}/prometheus/"
prometheus_config: "{{docker_dir}}/prometheus/config"
softserve_data: "{{docker_dir}}/softserve/data"
#
# pi
#

View File

@ -11,7 +11,7 @@
loop: "{{ backblaze_paths | dict2items | subelements('value') }}"
become: true
- name: Shut down docker
- name: Restart docker
shell:
cmd: "docker compose down --remove-orphans"
cmd: "docker compose up -d"
chdir: "{{ docker_compose_dir }}"

View File

@ -10,6 +10,7 @@
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '700'
state: directory
loop:
@ -20,6 +21,7 @@
file:
path: "{{ item }}"
owner: 911
group: 911
mode: '755'
state: directory
loop:
@ -29,17 +31,26 @@
- name: Create syncthing directory
file:
path: "{{ item }}"
owner: 1000
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ syncthing_data }}"
become: true
- name: Resolve inotify error for syncthing
template:
src: "templates/aya01/syncthing/syncthing.conf"
dest: "/etc/sysctl.d/syncthing.conf"
mode: "660"
become: true
- name: Create grafana data directory
file:
path: "{{ item }}"
owner: 1000
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
@ -48,6 +59,25 @@
- "{{ grafana_config }}"
become: true
- name: Copy grafana config
template:
owner: "{{ puid }}"
src: "templates/aya01/grafana/etc-grafana/grafana.ini"
dest: "{{ grafana_config }}/grafana.ini"
mode: '660'
become: true
- name: Create soft-serve directory
file:
path: "{{ item }}"
owner: "{{ puid }}"
group: "{{ pgid }}"
mode: '755'
state: directory
loop:
- "{{ softserve_data }}"
become: true
# Todo, check if docker compose is running
# - name: Shut down docker
# shell:

View File

@ -16,7 +16,6 @@ services:
- "MAX_LOG_SIZE_BYTES=1000000"
- "MAX_LOG_NUMBER=20"
- "TZ=Europe/Berlin"
zoneminder:
image: ghcr.io/zoneminder-containers/zoneminder-base:latest
restart: always
@ -45,6 +44,7 @@ services:
- "MAX_LOG_SIZE_BYTES=1000000"
- "MAX_LOG_NUMBER=20"
- "TZ=Europe/Berlin"
pihole:
container_name: pihole
image: pihole/pihole:latest
@ -76,6 +76,7 @@ services:
- "traefik.http.routers.pihole.rule=Host(`pihole.{{local_domain}}`)"
- "traefik.http.routers.pihole.entrypoints=web"
- "traefik.http.services.pihole.loadbalancer.server.port=8089"
syncthing:
image: syncthing/syncthing
container_name: syncthing
@ -83,8 +84,8 @@ services:
networks:
- net
environment:
- PUID=1000
- PGID=1000
- "PUID={{ puid }}"
- "PGID={{ pgid }}"
volumes:
- "{{syncthing_data}}:/var/syncthing"
ports:
@ -93,23 +94,29 @@ services:
- 22000:22000/udp # QUIC file transfers
- 21027:21027/udp # Receive local discovery broadcasts
restart: unless-stopped
grafana:
image: grafana/grafana-oss
container_name: grafana
hostname: grafana
user: "{{ puid }}:{{ pgid }}"
networks:
- net
environment:
- "PUID={{ puid }}"
- "PGID={{ pgid }}"
- "GF_LOG_MODE=console file"
volumes:
- "{{ grafana_data }}:/var/lib/grafana/"
- "{{ grafana_log }}:/var/log/grafana/"
- "{{ grafana_config }}:/etc/grafana/"
ports:
- 3000:3000
soft-serve:
image: charmcli/soft-serve:latest
container_name: soft-serve
volumes:
- "{{ softserve_data }}:/soft-serve"
ports:
- 23231:23231
restart: unless-stopped
networks:
zoneminder:

View File

@ -0,0 +1,54 @@
# Sample config for Prometheus.
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'Mikrotik'
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['localhost:9093']
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 10s
scrape_timeout: 10s
tls_config:
insecure_skip_verify: true
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
#static_configs:
#- targets: ['localhost:9090']
- job_name: Mikrotik
static_configs:
- targets:
- {{ mikrotik_ip }} # mikrotik_ip
metrics_path: /snmp
params:
module: [mikrotik]
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: mk_snmp_exporter:9116 # The SNMP exporter's real hostname:port.

View File

@ -0,0 +1 @@
fs.inotify.max_user_watches=204800