Compare commits
54 Commits
k3s
...
cb8ccd8f00
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb8ccd8f00 | ||
|
|
02168225b1 | ||
|
|
6ff1ccecd0 | ||
|
|
de62327fde | ||
|
|
b70c8408dc | ||
|
|
a913e1cbc0 | ||
|
|
e3c67a32e9 | ||
|
|
8f2998abc0 | ||
|
|
7fcee3912f | ||
|
|
591342f580 | ||
|
|
f2ea03bc01 | ||
|
|
0e8e07ed3e | ||
|
|
a2a58f6343 | ||
|
|
42196a32dc | ||
|
|
6934a9f5fc | ||
|
|
27621aac03 | ||
|
|
56f058c254 | ||
|
|
924e4a2f92 | ||
|
|
060e2425ff | ||
|
|
f2d489f63a | ||
|
|
4aa3e711c9 | ||
|
|
00e4f4807d | ||
|
|
161e6446cd | ||
|
|
ae929ca09d | ||
|
|
1017fed848 | ||
|
|
cb256e9451 | ||
|
|
6bc591550c | ||
|
|
e68d534e4f | ||
|
|
1a1b8cb69c | ||
|
|
88141f8869 | ||
|
|
6d099061ac | ||
|
|
711dc58f2e | ||
|
|
5aaf3eef53 | ||
|
|
33253e934d | ||
|
|
4db26b56da | ||
|
|
ce0411cdb0 | ||
|
|
28d946cae5 | ||
|
|
5d0f56ce38 | ||
|
|
0c1a8a95f2 | ||
|
|
05c35a546a | ||
|
|
d16cc0db06 | ||
|
|
2ae0f4863e | ||
|
|
7d58de98d9 | ||
|
|
92e4b3bb27 | ||
|
|
ed980f816f | ||
|
|
c0e81ee277 | ||
|
|
a09448985c | ||
|
|
95afa201e3 | ||
|
|
000375c7ba | ||
|
|
2cc4fd0be0 | ||
|
|
8fb4eaf610 | ||
|
|
3aa56be025 | ||
|
|
51a49d003d | ||
|
|
50abbf933c |
31
.ansible-lint
Normal file
31
.ansible-lint
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
# .ansible-lint
|
||||
|
||||
# Specify exclude paths to prevent linting vendor roles, etc.
|
||||
exclude_paths:
|
||||
- ./.git/
|
||||
- ./.venv/
|
||||
- ./galaxy_roles/
|
||||
|
||||
# A list of rules to skip. This is a more modern and readable alternative to 'skip_list'.
|
||||
skip_list:
|
||||
- experimental
|
||||
- fqcn-builtins
|
||||
- no-handler
|
||||
- var-naming
|
||||
|
||||
# Enforce certain rules that are not enabled by default.
|
||||
enable_list:
|
||||
- no-free-form
|
||||
- var-spacing
|
||||
- no-log-password
|
||||
- no-relative-path
|
||||
- command-instead-of-module
|
||||
- fqcn[deep]
|
||||
- no-changed-when
|
||||
|
||||
# Offline mode disables any features that require internet access.
|
||||
offline: true
|
||||
|
||||
# Set the desired verbosity level.
|
||||
verbosity: 1
|
||||
17
.editorconfig
Normal file
17
.editorconfig
Normal file
@@ -0,0 +1,17 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_size = 2
|
||||
|
||||
[*.py]
|
||||
indent_size = 4
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
/secrets.yml
|
||||
*.ovpn
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
<mxfile host="app.diagrams.net" modified="2023-11-05T13:55:54.105Z" agent="Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0" etag="qKRITLw66apjhZnPW2mG" version="21.6.2" pages="2">
|
||||
<diagram id="JSIfkQgaAO27B-iO4uI6" name="Homelab Overview">
|
||||
<mxGraphModel dx="2924" dy="1194" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="z4CzeoHyWsNDpYlZFiTu-73" target="z4CzeoHyWsNDpYlZFiTu-27">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="-500" y="530" as="targetPoint" />
|
||||
<Array as="points">
|
||||
<mxPoint x="10" y="320" />
|
||||
<mxPoint x="-515" y="320" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-66" value="192.168.20.1/24" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="z4CzeoHyWsNDpYlZFiTu-54">
|
||||
<mxGeometry x="-0.3363" y="1" relative="1" as="geometry">
|
||||
<mxPoint as="offset" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-55" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="z4CzeoHyWsNDpYlZFiTu-73" target="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="180" y="290" as="sourcePoint" />
|
||||
<Array as="points">
|
||||
<mxPoint x="105" y="360" />
|
||||
<mxPoint x="-20" y="360" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-65" value="192.168.30.1/24" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="z4CzeoHyWsNDpYlZFiTu-55">
|
||||
<mxGeometry x="-0.1082" y="1" relative="1" as="geometry">
|
||||
<mxPoint x="52" as="offset" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-56" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="z4CzeoHyWsNDpYlZFiTu-73" target="z4CzeoHyWsNDpYlZFiTu-41">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<Array as="points">
|
||||
<mxPoint x="295" y="360" />
|
||||
<mxPoint x="420" y="360" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-67" value="192.168.40.1/24" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="z4CzeoHyWsNDpYlZFiTu-56">
|
||||
<mxGeometry x="-0.1475" y="-2" relative="1" as="geometry">
|
||||
<mxPoint x="-33" as="offset" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="z4CzeoHyWsNDpYlZFiTu-73" target="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<Array as="points">
|
||||
<mxPoint x="390" y="320" />
|
||||
<mxPoint x="820" y="320" />
|
||||
</Array>
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-68" value="192.168.50.1/24" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="z4CzeoHyWsNDpYlZFiTu-57">
|
||||
<mxGeometry x="-0.2384" y="-3" relative="1" as="geometry">
|
||||
<mxPoint as="offset" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-27" value="Homelab VLAN20" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="-750" y="600" width="470" height="400" as="geometry">
|
||||
<mxRectangle x="-750" y="600" width="140" height="30" as="alternateBounds" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-90" value="<div>aya01.seyshiro.de</div><div>192.168.20.12</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server_storage;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-27">
|
||||
<mxGeometry x="20" y="40" width="105" height="105" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-19" value="<div>pi.seyshiro.de</div><div>192.168.20.11<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-27">
|
||||
<mxGeometry x="250" y="40" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-17" value="<div>inko.seyshiro.de</div><div>192.168.20.14<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-27">
|
||||
<mxGeometry x="140" y="40" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-20" value="<div>naruto.seyshiro.de</div><div>192.168.20.13<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-27">
|
||||
<mxGeometry x="360" y="40" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-35" value="User VLAN30" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="-200" y="600" width="360" height="400" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-28" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.tablet;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry x="50" y="50" width="100" height="70" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-8" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.pc;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry x="100" y="140" width="100" height="70" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-33" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.mobile;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry x="250" y="70" width="50" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-36" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.video_projector;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry x="220" y="210" width="100" height="35" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-46" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.laptop;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-35">
|
||||
<mxGeometry x="50" y="260" width="100" height="55" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-39" value="IoT VLAN50" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="680" y="600" width="280" height="460" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-52" value="<div>Brother MFC-L2710DW</div><div>192.168.50.219</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.copier;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry x="20" y="35" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-51" value="<div>Brother QL-820NWB</div><div>192.168.50.218</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.copier;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry x="150" y="35" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-60" value="Lightbulbs" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.comm_link;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry x="50" y="190" width="40" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-62" value="Shelly Power Outlet" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.comm_link;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry x="180" y="190" width="40" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-81" value="BirbCam" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.security_camera;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-39">
|
||||
<mxGeometry x="30" y="330" width="100" height="75" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-53" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="z4CzeoHyWsNDpYlZFiTu-40" target="z4CzeoHyWsNDpYlZFiTu-73">
|
||||
<mxGeometry relative="1" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-69" value="192.168.200.1/32" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="z4CzeoHyWsNDpYlZFiTu-53">
|
||||
<mxGeometry x="-0.3672" relative="1" as="geometry">
|
||||
<mxPoint as="offset" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-40" value="netcup VPS" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="-290" y="40" width="150" height="220" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-38" value="<div>mii.seyshiro.de</div><div>tudattr.dev<br></div><div>192.168.200.2<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.proxy_server;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-40">
|
||||
<mxGeometry x="20" y="50" width="105" height="105" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-41" value="Guest VLAN40" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="600" width="360" height="280" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-44" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.mobile;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-41">
|
||||
<mxGeometry x="250" y="70" width="50" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-47" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.tablet;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-41">
|
||||
<mxGeometry x="40" y="50" width="100" height="70" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-48" value="" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.laptop;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-41">
|
||||
<mxGeometry x="90" y="160" width="100" height="55" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-73" value="<div>Network Backbone&nbsp;</div><div>(Management VLAN 70)</div>" style="swimlane;whiteSpace=wrap;html=1;startSize=40;" vertex="1" parent="1">
|
||||
<mxGeometry x="10" y="40" width="380" height="220" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-10" value="<div>Mikrotik CRS 326</div><div>192.168.70.1<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.router;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-73">
|
||||
<mxGeometry x="60" y="85" width="100" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-70" value="<div>TP-Link EAP 225</div><div>192.168.70.250</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.wireless_modem;" vertex="1" parent="z4CzeoHyWsNDpYlZFiTu-73">
|
||||
<mxGeometry x="260" y="57.5" width="100" height="85" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z4CzeoHyWsNDpYlZFiTu-71" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;endArrow=none;endFill=0;" edge="1" parent="z4CzeoHyWsNDpYlZFiTu-73" source="z4CzeoHyWsNDpYlZFiTu-10" target="z4CzeoHyWsNDpYlZFiTu-70">
|
||||
<mxGeometry relative="1" as="geometry">
|
||||
<mxPoint x="30" y="142.5" as="sourcePoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
<diagram id="2pU-qBdMS-FfD6IS7qYU" name="VLAN View">
|
||||
<mxGraphModel dx="2440" dy="1405" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-28" value="netcup VPS" style="swimlane;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="480" y="20" width="150" height="220" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-29" value="<div>mii.seyshiro.de</div><div>tudattr.dev<br></div><div>192.168.200.2<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.proxy_server;" vertex="1" parent="7z5INb6uvPQJT5LWZGVQ-28">
|
||||
<mxGeometry x="20" y="50" width="105" height="105" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-34" value="<div>Network Backbone&nbsp;</div><div>(Management VLAN 70)</div>" style="swimlane;whiteSpace=wrap;html=1;startSize=40;" vertex="1" parent="1">
|
||||
<mxGeometry x="780" y="20" width="380" height="220" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-36" value="<div>TP-Link EAP 225</div><div>192.168.70.250</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.wireless_modem;" vertex="1" parent="7z5INb6uvPQJT5LWZGVQ-34">
|
||||
<mxGeometry x="260" y="57.5" width="100" height="85" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-35" value="<div>Mikrotik CRS 326</div><div>192.168.70.1<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.router;" vertex="1" parent="7z5INb6uvPQJT5LWZGVQ-34">
|
||||
<mxGeometry x="60" y="100" width="100" height="30" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-13" value="<div>naruto.seyshiro.de</div><div>192.168.20.13<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="1">
|
||||
<mxGeometry x="420" y="370" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-11" value="<div>pi.seyshiro.de</div><div>192.168.20.11<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="1">
|
||||
<mxGeometry x="310" y="370" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-12" value="<div>inko.seyshiro.de</div><div>192.168.20.14<br></div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server;" vertex="1" parent="1">
|
||||
<mxGeometry x="200" y="370" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-10" value="<div>aya01.seyshiro.de</div><div>192.168.20.12</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.server_storage;" vertex="1" parent="1">
|
||||
<mxGeometry x="80" y="370" width="105" height="105" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-21" value="<div>Brother MFC-L2710DW</div><div>192.168.50.219</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.copier;" vertex="1" parent="1">
|
||||
<mxGeometry x="1330" y="160" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-22" value="<div>Brother QL-820NWB</div><div>192.168.50.218</div>" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.copier;" vertex="1" parent="1">
|
||||
<mxGeometry x="1460" y="160" width="100" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-23" value="Lightbulbs" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.comm_link;" vertex="1" parent="1">
|
||||
<mxGeometry x="1360" y="315" width="40" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-24" value="Shelly Power Outlet" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.comm_link;" vertex="1" parent="1">
|
||||
<mxGeometry x="1490" y="315" width="40" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="7z5INb6uvPQJT5LWZGVQ-25" value="BirbCam" style="fontColor=#0066CC;verticalAlign=top;verticalLabelPosition=bottom;labelPosition=center;align=center;html=1;outlineConnect=0;fillColor=#CCCCCC;strokeColor=#6881B3;gradientColor=none;gradientDirection=north;strokeWidth=2;shape=mxgraph.networks.security_camera;" vertex="1" parent="1">
|
||||
<mxGeometry x="1340" y="455" width="100" height="75" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
||||
Binary file not shown.
282
README.md
282
README.md
@@ -1,227 +1,87 @@
|
||||
# TuDatTr IaC
|
||||
|
||||
## User
|
||||
It is expected that a user with sudo privilages is on the target, for me the users name is "tudattr"
|
||||
you can add such user with the following command `useradd -m -g sudo -s /bin/bash tudattr`
|
||||
Don't forget to set a password for the new user with `passwd tudattr`
|
||||
## sudo
|
||||
Install sudo on the target machine, with debian its
|
||||
**I do not recommend this project being used for ones own infrastructure, as
|
||||
this project is heavily attuned to my specific host/network setup**
|
||||
The Ansible Project to provision fresh Debian VMs for my Proxmox instances.
|
||||
Some values are hard coded such as the public key both in
|
||||
[./scripts/debian_seed.sh](./scripts/debian_seed.sh) and [./group_vars/all/vars.yml](./group_vars/all/vars.yml).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [secrets.yml](secrets.yml) in the root directory of this repository.
|
||||
Skeleton file can be found as [./secrets.yml.skeleton](./secrets.yml.skeleton).
|
||||
- IP Configuration of hosts like in [./host_vars/\*](./host_vars/*)
|
||||
- Setup [~/.ssh/config](~/.ssh/config) for the respective hosts used.
|
||||
- Install `passlib` for your operating system. Needed to hash passwords ad-hoc.
|
||||
|
||||
## Improvable Variables
|
||||
|
||||
- `group_vars/k3s/vars.yml`:
|
||||
- `k3s.server.ips`: Take list of IPs from host_vars `k3s_server*.yml`.
|
||||
- `k3s_db_connection_string`: Embed this variable in the `k3s.db.`-directory.
|
||||
Currently causes loop.
|
||||
|
||||
## Run Playbook
|
||||
|
||||
To run a first playbook and test the setup the following command can be executed.
|
||||
|
||||
```sh
|
||||
su root
|
||||
apt install sudo
|
||||
usermod -a -G sudo tudattr
|
||||
ansible-playbook -i production -J k3s-servers.yml
|
||||
```
|
||||
|
||||
## Backups
|
||||
Backup for aya01 and raspberry are in a backblaze b2, which gets encrypted on the clientside by rclone.
|
||||
but first of all we need to create the buckets and provide ansible with the needed information.
|
||||
This will run the [./k3s-servers.yml](./k3s-servers.yml) playbook and execute
|
||||
its roles.
|
||||
|
||||
First we need to create a api key for backblaze, consists of an id and a key.
|
||||
we use clone to sync to backblaze.
|
||||
we can encrypt the data with rclone before sending it to backblaze.
|
||||
to do this we need two buckets:
|
||||
- b2
|
||||
- crypt
|
||||
on each device that should be backupped.
|
||||
## After successful k3s installation
|
||||
|
||||
we create these by running `rclone config` and creating one [remote] b2 config and a [secret] crypt config. The crypt config should have two passwords that we store in our secrets file.
|
||||
To access our Kubernetes cluster from our host machine to work on it via
|
||||
flux and such we need to manually copy a k3s config from one of our server nodes to our host machine.
|
||||
Then we need to install `kubectl` on our host machine and optionally `kubectx` if we're already
|
||||
managing other Kubernetes instances.
|
||||
Then we replace the localhost address inside of the config with the IP of our load balancer.
|
||||
Finally we'll need to set the KUBECONFIG variable.
|
||||
|
||||
`
|
||||
## Vault
|
||||
- Create vault with: `ansible-vault create secrets.yml`
|
||||
- Create entry in vault with: `ansible-vault edit secrets.yml`
|
||||
- Add following entries: TODO
|
||||
|
||||
## Docker
|
||||
To add new docker containers to the docker role you need to add the following and replace `service` with the name of your service:
|
||||
|
||||
- Add relevent vars to `group_vars/all/vars.yaml`:
|
||||
```yaml
|
||||
service:
|
||||
host: "service"
|
||||
ports:
|
||||
http: "19999"
|
||||
volumes:
|
||||
config: "{{ docker_dir }}/service/" # config folder or your dir
|
||||
data: "{{ docker_data_dir }}/service/" # data folder or your dir (only works on aya01)
|
||||
```
|
||||
|
||||
- Create necessary directories for service in the docker role `roles/docker/tasks/service.yaml`
|
||||
```yaml
|
||||
- name: Create service dirs
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: 1000
|
||||
group: 1000
|
||||
mode: '775'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ service.volumes.config }}"
|
||||
- "{{ service.volumes.data }}"
|
||||
|
||||
# optional:
|
||||
# - name: Place service config
|
||||
# template:
|
||||
# owner: 1000
|
||||
# mode: '660'
|
||||
# src: "templates/hostname/service/service.yml"
|
||||
# dest: "{{ prm_config }}/service.yml"
|
||||
```
|
||||
|
||||
- Includ new tasks to `roles/docker/tasks/hostname_compose.yaml`:
|
||||
```yaml
|
||||
- include_tasks: service.yaml
|
||||
tags:
|
||||
- service
|
||||
```
|
||||
|
||||
- Add new service to compose `roles/docker/templates/hostname/compose.yaml`
|
||||
```yaml
|
||||
service:
|
||||
image: service/service
|
||||
container_name: service
|
||||
hostname: service
|
||||
networks:
|
||||
- net
|
||||
ports:
|
||||
- "{{service_port}}:19999"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- "{{service_config}}:/etc/service"
|
||||
- "{{service_lib}}:/var/lib/service"
|
||||
- "{{service_cache}}:/var/cache/service"
|
||||
```
|
||||
|
||||
## Server
|
||||
- Install Debian (debian-11.5.0-amd64-netinst.iso) on remote system
|
||||
- Create user (tudattr)
|
||||
- Get IP of remote system (192.168.20.11)
|
||||
- Create ssh-config entry
|
||||
```config
|
||||
Host aya01
|
||||
HostName 192.168.20.11
|
||||
Port 22
|
||||
User tudattr
|
||||
IdentityFile /mnt/veracrypt1/genesis
|
||||
```
|
||||
- copy public key to remote system
|
||||
`ssh-copy-id -i /mnt/veracrypt1/genesis.pub aya01`
|
||||
- Add this host to ansible inventory
|
||||
- Install sudo on remote
|
||||
- add user to sudo group (with `su --login` without login the path will not be loaded correctly see [here](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=918754)) and `usermod -a -G sudo tudattr`
|
||||
- set time correctly when getting the following error
|
||||
```sh
|
||||
Release file for http://security.debian.org/debian-security/dists/bullseye-security/InRelease is not valid yet (invalid for another 12h 46min 9s). Updates for this repository will not be applied.
|
||||
mkdir ~/.kube/
|
||||
scp k3s-server00:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
chown $USER ~/.kube/config
|
||||
sed -i "s/127.0.0.1/192.168.20.22/" ~/.kube/config
|
||||
export KUBECONFIG=~/.kube/config
|
||||
```
|
||||
By doing on remote system (example):
|
||||
|
||||
Install flux and continue in the flux repository.
|
||||
|
||||
## Longhorn Nodes
|
||||
|
||||
To create longhorn nodes from existing kubernetes nodes we want to increase
|
||||
their storage capacity. Since we're using VMs for our k3s nodes we can
|
||||
resize the root-disk of the VMs in the proxmox GUI.
|
||||
|
||||
Then we have to resize the partitions inside of the VM so the root partition
|
||||
uses the newly available space.
|
||||
When we have LVM-based root partition we can do the following:
|
||||
|
||||
```sh
|
||||
sudo systemctl stop ntp.service
|
||||
sudo ntpd -gq
|
||||
sudo systemctl start ntp.service
|
||||
```
|
||||
### zoneminder
|
||||
- Enable authentification in (Option->System)
|
||||
- Create new Camera:
|
||||
- General>Name: BirdCam
|
||||
- General>Function: Ffmpeg
|
||||
- General>Function: Modect
|
||||
- Source>Source Path: `rtsp://user:pw@ip:554/cam/mpeg4`
|
||||
- Change default admin password
|
||||
- Create users
|
||||
|
||||
|
||||
|
||||
## RaspberryPi
|
||||
- Install raspbian lite (2022-09-22-raspios-bullseye-arm64-lite.img) on pi
|
||||
- Get IP of remote system (192.168.20.11)
|
||||
- Create ssh-config entry
|
||||
```config
|
||||
Host pi
|
||||
HostName 192.168.20.11
|
||||
Port 22
|
||||
User tudattr
|
||||
IdentityFile /mnt/veracrypt1/genesis
|
||||
```
|
||||
- enable ssh on pi
|
||||
- copy public key to pi
|
||||
- change user password of user on pi
|
||||
- execute `ansible-playbook -i production --ask-vault-pass --extra-vars '@secrets.yml' pi.yml`
|
||||
|
||||
## Mikrotik
|
||||
- Create rsa-key on your device and name it mikrotik_rsa
|
||||
- On mikrotik run: `/user/ssh-keys/import public-key-file=mikrotik_rsa.pub user=tudattr`
|
||||
- Create ssh-config entry:
|
||||
```config
|
||||
Host mikrotik
|
||||
HostName 192.168.70.1
|
||||
Port 2200
|
||||
User tudattr
|
||||
IdentityFile /mnt/veracrypt1/mikrotik_rsa
|
||||
# Create a new partition from the free space.
|
||||
sudo fdisk /dev/sda
|
||||
# echo "n\n\n\n\n\nw\n"
|
||||
# n > 5x\n > w > \n
|
||||
# Create a LVM volume on the new partition
|
||||
sudo pvcreate /dev/sda3
|
||||
sudo vgextend k3s-vg /dev/sda3
|
||||
# Use the newly available storage in the root volume
|
||||
sudo lvresize -l +100%FREE -r /dev/k3s-vg/root
|
||||
```
|
||||
|
||||
### wireguard
|
||||
thanks to [mikrotik](https://www.medo64.com/2022/04/wireguard-on-mikrotik-routeros-7/)0
|
||||
quick code
|
||||
```
|
||||
# add wiregurad interface
|
||||
interface/wireguard/add listen-port=51820 name=wg1
|
||||
# get public key
|
||||
interface/wireguard/print
|
||||
$ > public-key: <mikrotik_public_key>
|
||||
# add network/ip for wireguard interface
|
||||
ip/address/add address=192.168.200.1/24 network=192.168.200.0 interface=wg1
|
||||
# add firewall rule for wireguard (maybe specify to be from pppoe-wan)
|
||||
/ip/firewall/filter/add chain=input protocol=udp dst-port=51820 action=accept
|
||||
# routing for wg1 clients and rest of the network
|
||||
> <insert forward for routing between wg1 and other networks>
|
||||
# enable internet for wg1 clients (may have to add to enable internet list
|
||||
/ip/firewall/nat/add chain=srcnat src-address=192.168.200.0/24 out-interface=pppoe-wan action=masquerade
|
||||
```
|
||||
add peer
|
||||
```
|
||||
/interface/wireguard/peers/add interface=wg1 allowed-address=<untaken_ipv4>/24 public-key="<client_public_key"
|
||||
```
|
||||
## Cloud Init VMs
|
||||
|
||||
Keygeneragion on archlinux `wg genkey | (umask 0077 && tee wireguard.key) | wg pubkey > peer_A.pub`
|
||||
Wireguard config on archlinux at `/etc/wireguard/wg0.conf`:
|
||||
```sh
|
||||
# On Hypervisor Host
|
||||
qm resize <vmid> scsi0 +32G
|
||||
# On VM
|
||||
sudo fdisk -l /dev/sda # To check
|
||||
echo 1 | sudo tee /sys/class/block/sda/device/rescan
|
||||
sudo fdisk -l /dev/sda # To check
|
||||
# sudo apt-get install cloud-guest-utils
|
||||
sudo growpart /dev/sda 1
|
||||
```
|
||||
[Interface]
|
||||
PrivateKey = <client_private_key>
|
||||
Address = 192.168.200.250/24
|
||||
|
||||
[Peer]
|
||||
PublicKey = <mikrotik public key>
|
||||
Endpoint = tudattr.dev:51820
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
```
|
||||
used ipv4:
|
||||
- tudattr: 192.168.200.250
|
||||
- livei: 192.168.200.240
|
||||
|
||||
#### notes
|
||||
- wireguard->add
|
||||
name: wg_tunnel01
|
||||
listen port: 51820
|
||||
[save]
|
||||
- wireguard->peers->add
|
||||
interface: wg_tunnel01
|
||||
endpoint port: 51820
|
||||
allowed address: ::/0
|
||||
psk: <password>
|
||||
persistent keepalive: 25
|
||||
- ip->address->address list->add
|
||||
address:192.168.200.1/24
|
||||
network: 192.168.200.0
|
||||
interface: wg_tunnel01
|
||||
|
||||
## troubleshooting
|
||||
### Docker networking problem
|
||||
`docker system prune -a`
|
||||
### Time problems (NTP service: n/a)
|
||||
systemctl status systemd-timesyncd.service
|
||||
when not available
|
||||
sudo apt install systemd-timesyncd/stable
|
||||
### Syncthing inotify
|
||||
echo "fs.inotify.max_user_watches=204800" | sudo tee -a /etc/sysctl.conf
|
||||
https://forum.cloudron.io/topic/7163/how-to-increase-inotify-limit-for-syncthing/2
|
||||
|
||||
38
ansible.cfg
Normal file
38
ansible.cfg
Normal file
@@ -0,0 +1,38 @@
|
||||
[defaults]
|
||||
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
|
||||
interpreter_python=python3
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Roles.
|
||||
roles_path=./roles
|
||||
|
||||
# (pathlist) Comma separated list of Ansible inventory sources
|
||||
inventory=./production.ini
|
||||
|
||||
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
vault_password_file=/media/veracrypt1/scripts/ansible_vault.sh
|
||||
|
||||
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
|
||||
# This affects vars_files, include_vars, inventory and vars plugins among others.
|
||||
yaml_valid_extensions=.yml
|
||||
|
||||
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host
|
||||
host_key_checking=False
|
||||
|
||||
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
|
||||
;retry_files_enabled=False
|
||||
|
||||
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
|
||||
# This file will be overwritten after each run with the list of failed hosts from all plays.
|
||||
;retry_files_save_path=
|
||||
|
||||
# (list) Allows to change the group variable precedence merge order.
|
||||
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
|
||||
|
||||
[colors]
|
||||
# (string) Defines the color to use when showing 'Skipped' task status
|
||||
skip=dark gray
|
||||
|
||||
[tags]
|
||||
# (list) default list of tags to skip in your plays, has precedence over Run Tags
|
||||
;skip=
|
||||
691
ansible.cfg.default
Normal file
691
ansible.cfg.default
Normal file
@@ -0,0 +1,691 @@
|
||||
[defaults]
|
||||
# (boolean) By default Ansible will issue a warning when received from a task action (module or action plugin)
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;action_warnings=True
|
||||
|
||||
# (list) Accept list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
|
||||
;cowsay_enabled_stencils=bud-frogs, bunny, cheese, daemon, default, dragon, elephant-in-snake, elephant, eyes, hellokitty, kitty, luke-koala, meow, milk, moofasa, moose, ren, sheep, small, stegosaurus, stimpy, supermilker, three-eyes, turkey, turtle, tux, udder, vader-koala, vader, www
|
||||
|
||||
# (string) Specify a custom cowsay path or swap in your cowsay implementation of choice
|
||||
;cowpath=
|
||||
|
||||
# (string) This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
|
||||
;cow_selection=default
|
||||
|
||||
# (boolean) This option forces color mode even when running without a TTY or the "nocolor" setting is True.
|
||||
;force_color=False
|
||||
|
||||
# (path) The default root path for Ansible config files on the controller.
|
||||
;home=~/.ansible
|
||||
|
||||
# (boolean) This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
|
||||
;nocolor=False
|
||||
|
||||
# (boolean) If you have cowsay installed but want to avoid the 'cows' (why????), use this.
|
||||
;nocows=False
|
||||
|
||||
# (boolean) Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
|
||||
;any_errors_fatal=False
|
||||
|
||||
# (path) The password file to use for the become plugin. --become-password-file.
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
;become_password_file=
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Become Plugins.
|
||||
;become_plugins={{ ANSIBLE_HOME ~ "/plugins/become:/usr/share/ansible/plugins/become" }}
|
||||
|
||||
# (string) Chooses which cache plugin to use, the default 'memory' is ephemeral.
|
||||
;fact_caching=memory
|
||||
|
||||
# (string) Defines connection or path information for the cache plugin
|
||||
;fact_caching_connection=
|
||||
|
||||
# (string) Prefix to use for cache plugin files/tables
|
||||
;fact_caching_prefix=ansible_facts
|
||||
|
||||
# (integer) Expiration timeout for the cache plugin data
|
||||
;fact_caching_timeout=86400
|
||||
|
||||
# (list) List of enabled callbacks, not all callbacks need enabling, but many of those shipped with Ansible do as we don't want them activated by default.
|
||||
;callbacks_enabled=
|
||||
|
||||
# (string) When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
|
||||
;collections_on_ansible_version_mismatch=warning
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
|
||||
|
||||
;collections_path={{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}
|
||||
|
||||
# (boolean) A boolean to enable or disable scanning the sys.path for installed collections
|
||||
;collections_scan_sys_path=True
|
||||
|
||||
# (path) The password file to use for the connection plugin. --connection-password-file.
|
||||
;connection_password_file=
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Action Plugins.
|
||||
;action_plugins={{ ANSIBLE_HOME ~ "/plugins/action:/usr/share/ansible/plugins/action" }}
|
||||
|
||||
# (boolean) When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'.
|
||||
# By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run through the templating engine late
|
||||
;allow_unsafe_lookups=False
|
||||
|
||||
# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting.
|
||||
;ask_pass=False
|
||||
|
||||
# (boolean) This controls whether an Ansible playbook should prompt for a vault password.
|
||||
;ask_vault_pass=False
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Cache Plugins.
|
||||
;cache_plugins={{ ANSIBLE_HOME ~ "/plugins/cache:/usr/share/ansible/plugins/cache" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Callback Plugins.
|
||||
;callback_plugins={{ ANSIBLE_HOME ~ "/plugins/callback:/usr/share/ansible/plugins/callback" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Cliconf Plugins.
|
||||
;cliconf_plugins={{ ANSIBLE_HOME ~ "/plugins/cliconf:/usr/share/ansible/plugins/cliconf" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Connection Plugins.
|
||||
;connection_plugins={{ ANSIBLE_HOME ~ "/plugins/connection:/usr/share/ansible/plugins/connection" }}
|
||||
|
||||
# (boolean) Toggles debug output in Ansible. This is *very* verbose and can hinder multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production.
|
||||
;debug=False
|
||||
|
||||
# (string) This indicates the command to use to spawn a shell under for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is.
|
||||
;executable=/bin/sh
|
||||
|
||||
# (string) This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering.
|
||||
# If not set, it will fallback to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``.
|
||||
# This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module.
|
||||
# The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules, by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults.
|
||||
;fact_path=
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
|
||||
;filter_plugins={{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}
|
||||
|
||||
# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host.
|
||||
# When false, the handlers will not run if a failure has occurred on a host.
|
||||
# This can also be set per play or on the command line. See Handlers and Failure for more details.
|
||||
;force_handlers=False
|
||||
|
||||
# (integer) Maximum number of forks Ansible will use to execute tasks on target hosts.
|
||||
;forks=5
|
||||
|
||||
# (string) This setting controls the default policy of fact gathering (facts discovered about remote systems).
|
||||
# This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin.
|
||||
;gathering=implicit
|
||||
|
||||
# (list) Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering. See the module documentation for specifics.
|
||||
# It does **not** apply to user defined ``ansible.builtin.setup`` tasks.
|
||||
;gather_subset=
|
||||
|
||||
# (integer) Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
|
||||
# It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks.
|
||||
;gather_timeout=
|
||||
|
||||
# (string) This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
|
||||
# This does not affect variables whose values are scalars (integers, strings) or arrays.
|
||||
# **WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it.
|
||||
# We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much complexity has been introduced into the data structures and plays.
|
||||
# For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, but the setting itself affects all sources and makes debugging even harder.
|
||||
# All playbooks and roles in the official examples repos assume the default for this setting.
|
||||
# Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables. For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
|
||||
# The Ansible project recommends you **avoid ``merge`` for new projects.**
|
||||
# It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it. New projects should **avoid 'merge'**.
|
||||
;hash_behaviour=replace
|
||||
|
||||
# (pathlist) Comma separated list of Ansible inventory sources
|
||||
;inventory=/etc/ansible/hosts
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for HttpApi Plugins.
|
||||
;httpapi_plugins={{ ANSIBLE_HOME ~ "/plugins/httpapi:/usr/share/ansible/plugins/httpapi" }}
|
||||
|
||||
# (float) This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage might be a concern.
|
||||
# The default corresponds to the value hardcoded in Ansible <= 2.1
|
||||
;internal_poll_interval=0.001
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Inventory Plugins.
|
||||
;inventory_plugins={{ ANSIBLE_HOME ~ "/plugins/inventory:/usr/share/ansible/plugins/inventory" }}
|
||||
|
||||
# (string) This is a developer-specific feature that allows enabling additional Jinja2 extensions.
|
||||
# See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)
|
||||
;jinja2_extensions=[]
|
||||
|
||||
# (boolean) This option preserves variable types during template operations.
|
||||
;jinja2_native=False
|
||||
|
||||
# (boolean) Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
|
||||
# If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
|
||||
;keep_remote_files=False
|
||||
|
||||
# (boolean) Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``.
|
||||
;bin_ansible_callbacks=False
|
||||
|
||||
# (tmppath) Temporary directory for Ansible to use on the controller.
|
||||
;local_tmp={{ ANSIBLE_HOME ~ "/tmp" }}
|
||||
|
||||
# (list) List of logger names to filter out of the log file
|
||||
;log_filter=
|
||||
|
||||
# (path) File to which Ansible will log on the controller. When empty logging is disabled.
|
||||
;log_path=
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Lookup Plugins.
|
||||
;lookup_plugins={{ ANSIBLE_HOME ~ "/plugins/lookup:/usr/share/ansible/plugins/lookup" }}
|
||||
|
||||
# (string) Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant for those two modules.
|
||||
;ansible_managed=Ansible managed
|
||||
|
||||
# (string) This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
|
||||
;module_args=
|
||||
|
||||
# (string) Compression scheme to use when transferring Python modules to the target.
|
||||
;module_compression=ZIP_DEFLATED
|
||||
|
||||
# (string) Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``.
|
||||
;module_name=command
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Modules.
|
||||
;library={{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
|
||||
;module_utils={{ ANSIBLE_HOME ~ "/plugins/module_utils:/usr/share/ansible/plugins/module_utils" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Netconf Plugins.
|
||||
;netconf_plugins={{ ANSIBLE_HOME ~ "/plugins/netconf:/usr/share/ansible/plugins/netconf" }}
|
||||
|
||||
# (boolean) Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures.
|
||||
;no_log=False
|
||||
|
||||
# (boolean) Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer style PowerShell modules from writing to the event log.
|
||||
;no_target_syslog=False
|
||||
|
||||
# (raw) What templating should return as a 'null' value. When not set it will let Jinja2 decide.
|
||||
;null_representation=
|
||||
|
||||
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed.
|
||||
;poll_interval=15
|
||||
|
||||
# (path) Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying --private-key with every invocation.
|
||||
;private_key_file=
|
||||
|
||||
# (boolean) By default, imported roles publish their variables to the play and other roles, this setting can avoid that.
|
||||
# This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook.
|
||||
# Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
|
||||
;private_role_vars=False
|
||||
|
||||
# (integer) Port to use in remote connections, when blank it will use the connection plugin default.
|
||||
;remote_port=
|
||||
|
||||
# (string) Sets the login user for the target machines
|
||||
# When blank it uses the connection plugin's default, normally the user currently executing Ansible.
|
||||
;remote_user=
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Roles.
|
||||
;roles_path={{ ANSIBLE_HOME ~ "/roles:/usr/share/ansible/roles:/etc/ansible/roles" }}
|
||||
|
||||
# (string) Set the main callback used to display Ansible output. You can only have one at a time.
|
||||
# You can have many other callbacks, but just one can be in charge of stdout.
|
||||
# See :ref:`callback_plugins` for a list of available options.
|
||||
;stdout_callback=default
|
||||
|
||||
# (string) Set the default strategy used for plays.
|
||||
;strategy=linear
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Strategy Plugins.
|
||||
;strategy_plugins={{ ANSIBLE_HOME ~ "/plugins/strategy:/usr/share/ansible/plugins/strategy" }}
|
||||
|
||||
# (boolean) Toggle the use of "su" for tasks.
|
||||
;su=False
|
||||
|
||||
# (string) Syslog facility to use when Ansible logs to the remote target
|
||||
;syslog_facility=LOG_USER
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Terminal Plugins.
|
||||
;terminal_plugins={{ ANSIBLE_HOME ~ "/plugins/terminal:/usr/share/ansible/plugins/terminal" }}
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
|
||||
;test_plugins={{ ANSIBLE_HOME ~ "/plugins/test:/usr/share/ansible/plugins/test" }}
|
||||
|
||||
# (integer) This is the default timeout for connection plugins to use.
|
||||
;timeout=10
|
||||
|
||||
# (string) Can be any connection plugin available to your ansible installation.
|
||||
# There is also a (DEPRECATED) special 'smart' option, that will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions.
|
||||
;transport=ssh
|
||||
|
||||
# (boolean) When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
|
||||
# Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written.
|
||||
;error_on_undefined_vars=True
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Vars Plugins.
|
||||
;vars_plugins={{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}
|
||||
|
||||
# (string) The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.
|
||||
;vault_encrypt_identity=
|
||||
|
||||
# (string) The label to use for the default vault id label in cases where a vault id label is not provided
|
||||
;vault_identity=default
|
||||
|
||||
# (list) A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.
|
||||
;vault_identity_list=
|
||||
|
||||
# (string) If true, decrypting vaults with a vault id will only try the password from the matching vault-id
|
||||
;vault_id_match=False
|
||||
|
||||
# (path) The vault password file to use. Equivalent to --vault-password-file or --vault-id
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
;vault_password_file=
|
||||
|
||||
# (integer) Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
|
||||
;verbosity=0
|
||||
|
||||
# (boolean) Toggle to control the showing of deprecation warnings
|
||||
;deprecation_warnings=True
|
||||
|
||||
# (boolean) Toggle to control showing warnings related to running devel
|
||||
;devel_warning=True
|
||||
|
||||
# (boolean) Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header.
|
||||
# This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed.
|
||||
# If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values See How do I keep secret data in my playbook? for more information.
|
||||
;display_args_to_stdout=False
|
||||
|
||||
# (boolean) Toggle to control displaying skipped task/host entries in a task in the default callback
|
||||
;display_skipped_hosts=True
|
||||
|
||||
# (string) Root docsite URL used to generate docs URLs in warning/error text; must be an absolute URL with valid scheme and trailing slash.
|
||||
;docsite_root_url=https://docs.ansible.com/ansible-core/
|
||||
|
||||
# (pathspec) Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
|
||||
;doc_fragment_plugins={{ ANSIBLE_HOME ~ "/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments" }}
|
||||
|
||||
# (string) By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;duplicate_dict_key=warn
|
||||
|
||||
# (boolean) Whether or not to enable the task debugger, this previously was done as a strategy plugin.
|
||||
# Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
|
||||
# a task is failed on unreachable. Use the debugger keyword for more flexibility.
|
||||
;enable_task_debugger=False
|
||||
|
||||
# (boolean) Toggle to allow missing handlers to become a warning instead of an error when notifying.
|
||||
;error_on_missing_handler=True
|
||||
|
||||
# (list) Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
|
||||
# If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figure it out).
|
||||
# This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
|
||||
;facts_modules=smart
|
||||
|
||||
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host
|
||||
;host_key_checking=True
|
||||
|
||||
# (boolean) Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
|
||||
# Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
|
||||
;inject_facts_as_vars=True
|
||||
|
||||
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
|
||||
;interpreter_python=auto
|
||||
|
||||
# (boolean) If 'false', invalid attributes for a task will result in warnings instead of errors
|
||||
;invalid_task_attribute_failed=True
|
||||
|
||||
# (boolean) Toggle to control showing warnings related to running a Jinja version older than required for jinja2_native
|
||||
;jinja2_native_warning=True
|
||||
|
||||
# (boolean) By default Ansible will issue a warning when there are no hosts in the inventory.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;localhost_warning=True
|
||||
|
||||
# (int) Maximum size of files to be considered for diff display
|
||||
;max_diff_size=104448
|
||||
|
||||
# (list) List of extensions to ignore when looking for modules to load
|
||||
# This is for rejecting script and binary module fallback extensions
|
||||
;module_ignore_exts={{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}
|
||||
|
||||
# (bool) Enables whether module responses are evaluated for containing non UTF-8 data
|
||||
# Disabling this may result in unexpected behavior
|
||||
# Only ansible-core should evaluate this configuration
|
||||
;module_strict_utf8_response=True
|
||||
|
||||
# (list) TODO: write it
|
||||
;network_group_modules=eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos
|
||||
|
||||
# (boolean) Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
|
||||
;old_plugin_cache_clear=False
|
||||
|
||||
# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
|
||||
;playbook_dir=
|
||||
|
||||
# (string) This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
|
||||
;playbook_vars_root=top
|
||||
|
||||
# (path) A path to configuration for filtering which plugins installed on the system are allowed to be used.
|
||||
# See :ref:`plugin_filtering_config` for details of the filter file's format.
|
||||
# The default is /etc/ansible/plugin_filters.yml
|
||||
;plugin_filters_cfg=
|
||||
|
||||
# (string) Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default value of 0 does not attempt to adjust existing system-defined limits.
|
||||
;python_module_rlimit_nofile=0
|
||||
|
||||
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
|
||||
;retry_files_enabled=False
|
||||
|
||||
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
|
||||
# This file will be overwritten after each run with the list of failed hosts from all plays.
|
||||
;retry_files_save_path=
|
||||
|
||||
# (str) This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
|
||||
;run_vars_plugins=demand
|
||||
|
||||
# (bool) This adds the custom stats set via the set_stats plugin to the default output
|
||||
;show_custom_stats=False
|
||||
|
||||
# (string) Action to take when a module parameter value is converted to a string (this does not affect variables). For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. will be converted by the YAML parser unless fully quoted.
|
||||
# Valid options are 'error', 'warn', and 'ignore'.
|
||||
# Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
|
||||
;string_conversion_action=warn
|
||||
|
||||
# (boolean) Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
|
||||
# These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
|
||||
;system_warnings=True
|
||||
|
||||
# (boolean) This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified.
|
||||
# True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
|
||||
;task_debugger_ignore_errors=True
|
||||
|
||||
# (integer) Set the maximum time (in seconds) that a task can run for.
|
||||
# If set to 0 (the default) there is no timeout.
|
||||
;task_timeout=0
|
||||
|
||||
# (string) Make ansible transform invalid characters in group names supplied by inventory sources.
|
||||
;force_valid_group_names=never
|
||||
|
||||
# (boolean) Toggles the use of persistence for connections.
|
||||
;use_persistent_connections=False
|
||||
|
||||
# (bool) A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group. Metadata containing unexpected fields or value types will produce a warning when this is True.
|
||||
;validate_action_group_metadata=True
|
||||
|
||||
# (list) Accept list for variable plugins that require it.
|
||||
;vars_plugins_enabled=host_group_vars
|
||||
|
||||
# (list) Allows to change the group variable precedence merge order.
|
||||
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
|
||||
|
||||
# (string) The salt to use for the vault encryption. If it is not provided, a random salt will be used.
|
||||
;vault_encrypt_salt=
|
||||
|
||||
# (bool) Force 'verbose' option to use stderr instead of stdout
|
||||
;verbose_to_stderr=False
|
||||
|
||||
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
|
||||
# This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the overall maximum duration the task can take will be extended by the amount specified here.
|
||||
;win_async_startup_timeout=5
|
||||
|
||||
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
|
||||
# This affects vars_files, include_vars, inventory and vars plugins among others.
|
||||
;yaml_valid_extensions=.yml, .yaml, .json
|
||||
|
||||
|
||||
[privilege_escalation]
|
||||
# (boolean) Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
|
||||
;agnostic_become_prompt=True
|
||||
|
||||
# (boolean) This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
|
||||
# If executable, it will be run and the resulting stdout will be used as the password.
|
||||
;become_allow_same_user=False
|
||||
|
||||
# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login.
|
||||
;become=False
|
||||
|
||||
# (boolean) Toggle to prompt for privilege escalation password.
|
||||
;become_ask_pass=False
|
||||
|
||||
# (string) executable to use for privilege escalation, otherwise Ansible will depend on PATH
|
||||
;become_exe=
|
||||
|
||||
# (string) Flags to pass to the privilege escalation executable.
|
||||
;become_flags=
|
||||
|
||||
# (string) Privilege escalation method to use when `become` is enabled.
|
||||
;become_method=sudo
|
||||
|
||||
# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
|
||||
;become_user=root
|
||||
|
||||
|
||||
[persistent_connection]
|
||||
# (path) Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
|
||||
# If null, ansible will start with the same directory as the ansible script.
|
||||
;ansible_connection_path=
|
||||
|
||||
# (int) This controls the amount of time to wait for response from remote device before timing out persistent connection.
|
||||
;command_timeout=30
|
||||
|
||||
# (integer) This controls the retry timeout for persistent connection to connect to the local domain socket.
|
||||
;connect_retry_timeout=15
|
||||
|
||||
# (integer) This controls how long the persistent connection will remain idle before it is destroyed.
|
||||
;connect_timeout=30
|
||||
|
||||
# (path) Path to socket to be used by the connection persistence system.
|
||||
;control_path_dir={{ ANSIBLE_HOME ~ "/pc" }}
|
||||
|
||||
|
||||
[connection]
|
||||
# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
|
||||
# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer.
|
||||
# It can result in a very significant performance improvement when enabled.
|
||||
# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default.
|
||||
# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
|
||||
;pipelining=False
|
||||
|
||||
|
||||
[colors]
|
||||
# (string) Defines the color to use on 'Changed' task status
|
||||
;changed=yellow
|
||||
|
||||
# (string) Defines the default color to use for ansible-console
|
||||
;console_prompt=white
|
||||
|
||||
# (string) Defines the color to use when emitting debug messages
|
||||
;debug=dark gray
|
||||
|
||||
# (string) Defines the color to use when emitting deprecation messages
|
||||
;deprecate=purple
|
||||
|
||||
# (string) Defines the color to use when showing added lines in diffs
|
||||
;diff_add=green
|
||||
|
||||
# (string) Defines the color to use when showing diffs
|
||||
;diff_lines=cyan
|
||||
|
||||
# (string) Defines the color to use when showing removed lines in diffs
|
||||
;diff_remove=red
|
||||
|
||||
# (string) Defines the color to use when emitting error messages
|
||||
;error=red
|
||||
|
||||
# (string) Defines the color to use for highlighting
|
||||
;highlight=white
|
||||
|
||||
# (string) Defines the color to use when showing 'OK' task status
|
||||
;ok=green
|
||||
|
||||
# (string) Defines the color to use when showing 'Skipped' task status
|
||||
;skip=cyan
|
||||
|
||||
# (string) Defines the color to use on 'Unreachable' status
|
||||
;unreachable=bright red
|
||||
|
||||
# (string) Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
|
||||
;verbose=blue
|
||||
|
||||
# (string) Defines the color to use when emitting warning messages
|
||||
;warn=bright purple
|
||||
|
||||
|
||||
[selinux]
|
||||
# (boolean) This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh. This is necessary when running on systems which do not have SELinux.
|
||||
;libvirt_lxc_noseclabel=False
|
||||
|
||||
# (list) Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors.
|
||||
# Data corruption may occur and writes are not always verified when a filesystem is in the list.
|
||||
;special_context_filesystems=fuse, nfs, vboxsf, ramfs, 9p, vfat
|
||||
|
||||
|
||||
[diff]
|
||||
# (bool) Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
|
||||
;always=False
|
||||
|
||||
# (integer) How many lines of context to show when displaying the differences between files.
|
||||
;context=3
|
||||
|
||||
|
||||
[galaxy]
|
||||
# (path) The directory that stores cached responses from a Galaxy server.
|
||||
# This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
|
||||
# Cache files inside this dir will be ignored if they are world writable.
|
||||
;cache_dir={{ ANSIBLE_HOME ~ "/galaxy_cache" }}
|
||||
|
||||
# (bool) whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`
|
||||
;collections_path_warning=True
|
||||
|
||||
# (path) Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
|
||||
;collection_skeleton=
|
||||
|
||||
# (list) patterns of files to ignore inside a Galaxy collection skeleton directory
|
||||
;collection_skeleton_ignore=^.git$, ^.*/.git_keep$
|
||||
|
||||
# (bool) Disable GPG signature verification during collection installation.
|
||||
;disable_gpg_verify=False
|
||||
|
||||
# (bool) Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when outputting the stdout to a file.
|
||||
# This config option controls whether the display wheel is shown or not.
|
||||
# The default is to show the display wheel if stdout has a tty.
|
||||
;display_progress=
|
||||
|
||||
# (path) Configure the keyring used for GPG signature verification during collection installation and verification.
|
||||
;gpg_keyring=
|
||||
|
||||
# (boolean) If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate.
|
||||
;ignore_certs=
|
||||
|
||||
# (list) A list of GPG status codes to ignore during GPG signature verification. See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
|
||||
# If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`, signature verification will fail even if all error codes are ignored.
|
||||
;ignore_signature_status_codes=
|
||||
|
||||
# (str) The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
|
||||
# This should be a positive integer or all to indicate all signatures must successfully validate the collection.
|
||||
# Prepend + to the value to fail if no valid signatures are found for the collection.
|
||||
;required_valid_signature_count=1
|
||||
|
||||
# (path) Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
|
||||
;role_skeleton=
|
||||
|
||||
# (list) patterns of files to ignore inside a Galaxy role or collection skeleton directory
|
||||
;role_skeleton_ignore=^.git$, ^.*/.git_keep$
|
||||
|
||||
# (string) URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source.
|
||||
;server=https://galaxy.ansible.com
|
||||
|
||||
# (list) A list of Galaxy servers to use when installing a collection.
|
||||
# The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
|
||||
# See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.
|
||||
# The order of servers in this list is used to as the order in which a collection is resolved.
|
||||
# Setting this config option will ignore the :ref:`galaxy_server` config option.
|
||||
;server_list=
|
||||
|
||||
# (int) The default timeout for Galaxy API calls. Galaxy servers that don't configure a specific timeout will fall back to this value.
|
||||
;server_timeout=60
|
||||
|
||||
# (path) Local path to galaxy access token file
|
||||
;token_path={{ ANSIBLE_HOME ~ "/galaxy_token" }}
|
||||
|
||||
|
||||
[inventory]
|
||||
# (string) This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
|
||||
;host_pattern_mismatch=warning
|
||||
|
||||
# (boolean) If 'true', it is a fatal error when any given inventory source cannot be successfully parsed by any available inventory plugin; otherwise, this situation only attracts a warning.
|
||||
|
||||
;any_unparsed_is_failed=False
|
||||
|
||||
# (bool) Toggle to turn on inventory caching.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache=False
|
||||
|
||||
# (string) The plugin for caching inventory.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_plugin=
|
||||
|
||||
# (string) The inventory cache connection.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_connection=
|
||||
|
||||
# (string) The table prefix for the cache plugin.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_prefix=ansible_inventory_
|
||||
|
||||
# (string) Expiration timeout for the inventory cache plugin data.
|
||||
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
|
||||
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
|
||||
# This message will be removed in 2.16.
|
||||
;cache_timeout=3600
|
||||
|
||||
# (list) List of enabled inventory plugins, it also determines the order in which they are used.
|
||||
;enable_plugins=host_list, script, auto, yaml, ini, toml
|
||||
|
||||
# (bool) Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
|
||||
;export=False
|
||||
|
||||
# (list) List of extensions to ignore when using a directory as an inventory source
|
||||
;ignore_extensions={{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}
|
||||
|
||||
# (list) List of patterns to ignore when using a directory as an inventory source
|
||||
;ignore_patterns=
|
||||
|
||||
# (bool) If 'true' it is a fatal error if every single potential inventory source fails to parse, otherwise this situation will only attract a warning.
|
||||
|
||||
;unparsed_is_failed=False
|
||||
|
||||
# (boolean) By default Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory.
|
||||
# These warnings can be silenced by adjusting this setting to False.
|
||||
;inventory_unparsed_warning=True
|
||||
|
||||
|
||||
[netconf_connection]
|
||||
# (string) This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump host ssh settings should be present in ~/.ssh/config file, alternatively it can be set to custom ssh configuration file path to read the bastion/jump host settings.
|
||||
;ssh_config=
|
||||
|
||||
|
||||
[paramiko_connection]
|
||||
# (boolean) TODO: write it
|
||||
;host_key_auto_add=False
|
||||
|
||||
# (boolean) TODO: write it
|
||||
;look_for_keys=True
|
||||
|
||||
|
||||
[jinja2]
|
||||
# (list) This list of filters avoids 'type conversion' when templating variables
|
||||
# Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
|
||||
;dont_type_filters=string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json
|
||||
|
||||
|
||||
[tags]
|
||||
# (list) default list of tags to run in your plays, Skip Tags has precedence.
|
||||
;run=
|
||||
|
||||
# (list) default list of tags to skip in your plays, has precedence over Run Tags
|
||||
;skip=
|
||||
|
||||
29
aya01.yml
29
aya01.yml
@@ -1,29 +0,0 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: aya01
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: samba
|
||||
tags:
|
||||
- samba
|
||||
# - role: power_management
|
||||
# tags:
|
||||
# - power_management
|
||||
- role: backblaze
|
||||
tags:
|
||||
- backblaze
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
- role: snmp_exporter
|
||||
tags:
|
||||
- snmp_exporter
|
||||
- role: smart_exporter
|
||||
tags:
|
||||
- smart_exporter
|
||||
- role: docker
|
||||
tags:
|
||||
- docker
|
||||
14
group_vars/all/secrets.yml
Normal file
14
group_vars/all/secrets.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65646664663537386235383334613536393336623332363437376337323235636335363165366632
|
||||
3433623633393731373932306433643663333133393734370a353261353164353335356264643234
|
||||
65376132336534306465376435303764616136646633303166336136373263346436353235343065
|
||||
6238353863333239330a303131623262353563323864323536313036356237653936326361366565
|
||||
62616566396266363535653062636537383061363438303138333237643939323162336465326363
|
||||
64323830393839386233303634326562386537373766646461376238663963376463623130303363
|
||||
65366638666132393538336361663639303831333232336632616338396539353565663239373265
|
||||
38323036343733303131383439323738623263383736303935636339303564343662633437626233
|
||||
33303564373963646465306137346161656166366266663766356362636362643430393232646635
|
||||
38363764386538613166306464336532623464343565396431643738353434313838633763663861
|
||||
35616365383831643434316436313035366131663131373064663464393031623132366137303333
|
||||
62333561373465323664303539353966663763613365373633373761343966656166363265313134
|
||||
6163
|
||||
@@ -1,545 +1,20 @@
|
||||
#
|
||||
# Essential
|
||||
#
|
||||
|
||||
root: root
|
||||
user: tudattr
|
||||
timezone: Europe/Berlin
|
||||
rclone_config: "/root/.config/rclone/"
|
||||
puid: "1000"
|
||||
pgid: "1000"
|
||||
pk_path: "/mnt/veracrypt1/genesis"
|
||||
pk_path: "/media/veracrypt1/genesis"
|
||||
pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKqc9fnzfCz8fQDFzla+D8PBhvaMmFu2aF+TYkkZRxl9 tuan@genesis-2022-01-20"
|
||||
|
||||
|
||||
local_domain: tudattr.dev
|
||||
local_subdomains: "local"
|
||||
remote_domain: tudattr.dev
|
||||
remote_subdomains: "www,plex,status,tautulli"
|
||||
backup_domain: seyshiro.de
|
||||
backup_subdomains: "hass,qbit,zm,"
|
||||
|
||||
#
|
||||
# aya01
|
||||
#
|
||||
|
||||
aya01_host: "aya01"
|
||||
aya01_ip: "192.168.20.12"
|
||||
|
||||
#
|
||||
# mii
|
||||
#
|
||||
|
||||
mii_host: "mii"
|
||||
mii_ip: "192.168.200.2"
|
||||
|
||||
#
|
||||
# naruto
|
||||
#
|
||||
naruto_host: "naruto"
|
||||
naruto_ip: "192.168.20.13"
|
||||
|
||||
#
|
||||
# pi
|
||||
#
|
||||
|
||||
pi_host: "pi"
|
||||
pi_ip: "192.168.20.11"
|
||||
|
||||
#
|
||||
# inko
|
||||
#
|
||||
|
||||
inko_host: "inko"
|
||||
inko_ip: "192.168.20.14"
|
||||
|
||||
#
|
||||
# Used to download for git releases
|
||||
#
|
||||
|
||||
go_arch_map:
|
||||
i386: '386'
|
||||
x86_64: 'amd64'
|
||||
aarch64: 'arm64'
|
||||
armv7l: 'armv7'
|
||||
armv6l: 'armv6'
|
||||
|
||||
go_arch: "{{ go_arch_map[ansible_architecture] | default(ansible_architecture) }}"
|
||||
|
||||
#
|
||||
# aya01 - Disks
|
||||
#
|
||||
|
||||
fstab_entries:
|
||||
- name: "config"
|
||||
path: "/opt"
|
||||
type: "ext4"
|
||||
uuid: "cad60133-dd84-4a2a-8db4-2881c608addf"
|
||||
- name: "media0"
|
||||
path: "/mnt/media0"
|
||||
type: "ext4"
|
||||
uuid: "c4c724ec-4fe3-4665-adf4-acd31d6b7f95"
|
||||
- name: "media1"
|
||||
path: "/mnt/media1"
|
||||
type: "ext4"
|
||||
uuid: "8d66d395-1e35-4f5a-a5a7-d181d6642ebf"
|
||||
|
||||
mergerfs_entries:
|
||||
- name: "media"
|
||||
path: "/media"
|
||||
branches:
|
||||
- "/mnt/media0"
|
||||
- "/mnt/media1"
|
||||
opts:
|
||||
- "use_ino"
|
||||
- "allow_other"
|
||||
- "cache.files=partial"
|
||||
- "dropcacheonclose=true"
|
||||
- "category.create=mfs"
|
||||
type: "fuse.mergerfs"
|
||||
public_domain: tudattr.dev
|
||||
internal_domain: seyshiro.de
|
||||
|
||||
#
|
||||
# Packages
|
||||
#
|
||||
common_packages:
|
||||
- sudo
|
||||
- git
|
||||
- iperf3
|
||||
- git
|
||||
- smartmontools
|
||||
- vim
|
||||
- curl
|
||||
- tree
|
||||
- rsync
|
||||
- systemd-timesyncd
|
||||
- neofetch
|
||||
- build-essential
|
||||
- btrfs-progs
|
||||
|
||||
#
|
||||
# Docker
|
||||
#
|
||||
docker_repo_url: https://download.docker.com/linux
|
||||
docker_apt_gpg_key: "{{ docker_repo_url }}/{{ ansible_distribution | lower }}/gpg"
|
||||
docker_apt_release_channel: stable
|
||||
docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
docker_apt_repository: "deb [arch={{ docker_apt_arch }}] {{ docker_repo_url }}/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
|
||||
docker_network: "172.16.69.0/24"
|
||||
|
||||
docker_compose_dir: /opt/docker/compose
|
||||
docker_dir: /opt/docker/config
|
||||
docker_data_dir: /media/docker/data # only available on aya01
|
||||
|
||||
mysql_user: user
|
||||
|
||||
#
|
||||
# ZoneMinder
|
||||
#
|
||||
|
||||
zoneminder_host: "zm"
|
||||
zoneminder_port: "8081"
|
||||
zoneminder_network: "172.16.42.0/24"
|
||||
|
||||
zoneminder_root: "{{ docker_dir }}/zm"
|
||||
zoneminder_config: "{{ zoneminder_root }}/config"
|
||||
zoneminder_log: "{{ zoneminder_root}}/log"
|
||||
zoneminder_db: "{{ zoneminder_root}}/db"
|
||||
|
||||
zoneminder_data: "{{ docker_data_dir }}/zm/data"
|
||||
|
||||
#
|
||||
# Syncthing
|
||||
#
|
||||
|
||||
syncthing_host: "syncthing"
|
||||
syncthing_port: "8384"
|
||||
syncthing_data: "{{docker_data_dir}}/syncthing/"
|
||||
|
||||
#
|
||||
# Softserve
|
||||
#
|
||||
|
||||
softserve_data: "{{docker_dir}}/softserve/data"
|
||||
|
||||
#
|
||||
# cupsd
|
||||
#
|
||||
|
||||
cupsd_host: "cupsd"
|
||||
cupsd_port: "631"
|
||||
cupsd_config: "{{ docker_dir }}/cupsd/"
|
||||
|
||||
#
|
||||
# Uptime Kuma
|
||||
#
|
||||
|
||||
kuma_host: "status"
|
||||
kuma_port: "3001"
|
||||
kuma_config: "{{ docker_dir }}/kuma/"
|
||||
|
||||
#
|
||||
# Traefik
|
||||
#
|
||||
|
||||
traefik:
|
||||
host: "traefik"
|
||||
admin:
|
||||
port: "8080"
|
||||
config: "{{ docker_dir }}/traefik/etc-traefik/"
|
||||
data: "{{ docker_dir }}/traefik/var-log/"
|
||||
letsencrypt: "{{ docker_dir }}/traefik/letsencrypt/"
|
||||
user:
|
||||
web: "80"
|
||||
websecure: "443"
|
||||
|
||||
#
|
||||
# DynDns Updater
|
||||
#
|
||||
|
||||
ddns_host: "ddns"
|
||||
ddns_port: "8000"
|
||||
ddns_data: "{{ docker_dir }}/ddns-updater/data/"
|
||||
|
||||
#
|
||||
# Home Assistant
|
||||
#
|
||||
|
||||
ha_host: "hass"
|
||||
ha_port: "8123"
|
||||
ha_config: "{{ docker_dir }}/home-assistant/config/"
|
||||
|
||||
#
|
||||
# pihole
|
||||
#
|
||||
|
||||
pihole_host: "pihole"
|
||||
pihole_port: "8089"
|
||||
pihole_config: "{{ docker_dir }}/pihole/etc-pihole/"
|
||||
pihole_dnsmasq: "{{ docker_dir }}/pihole/etc-dnsmasq.d/"
|
||||
|
||||
#
|
||||
# backblaze
|
||||
#
|
||||
|
||||
# Directories that will be backupped to backblaze
|
||||
# MOVED TO HOSTVARS
|
||||
# backblaze_paths:
|
||||
# aya01:
|
||||
# - "{{ docker_compose_dir }}"
|
||||
# - "{{ docker_dir }}"
|
||||
# pi:
|
||||
# - "{{ docker_compose_dir }}"
|
||||
# - "{{ docker_dir }}"
|
||||
|
||||
#
|
||||
# samba
|
||||
#
|
||||
samba:
|
||||
dependencies:
|
||||
- "samba"
|
||||
- "smbclient"
|
||||
- "cifs-utils"
|
||||
user: "smbuser"
|
||||
group: "smbshare"
|
||||
config: "templates/smb.conf"
|
||||
shares:
|
||||
media:
|
||||
name: "media"
|
||||
path: "/media"
|
||||
paperless:
|
||||
name: "paperless"
|
||||
path: "{{ paperless.data.consume }}"
|
||||
|
||||
|
||||
#
|
||||
# netdata
|
||||
#
|
||||
|
||||
netdata_port: "19999"
|
||||
netdata_config: "{{ docker_dir }}/netdata/"
|
||||
netdata_lib: "{{ docker_data_dir }}/netdata/lib/"
|
||||
netdata_cache: "{{ docker_data_dir }}/netdata/cache"
|
||||
|
||||
#
|
||||
# Plex
|
||||
#
|
||||
|
||||
plex_host: "plex"
|
||||
# plex_ip: "172.16.69.12"
|
||||
plex_port: "32400"
|
||||
plex_config: "{{docker_data_dir}}/{{ plex_host }}/config"
|
||||
plex_tv: "/media/series"
|
||||
plex_movies: "/media/movies"
|
||||
plex_music: "/media/songs"
|
||||
|
||||
#
|
||||
# WireGuard
|
||||
#
|
||||
|
||||
wg_config: "templates/wg0.conf"
|
||||
wg_remote_config: "/etc/wireguard/wg0.conf"
|
||||
wg_service: "wg-quick@wg0.service"
|
||||
wg_deps: "wireguard"
|
||||
|
||||
wg_ip: "192.168.200.2"
|
||||
wg_pubkey: "+LaPESyBF6Sb1lqkk4UcestFpXNaKYyyX99tkqwLQhU="
|
||||
wg_endpoint: "{{ local_subdomains }}.{{ local_domain }}:51820"
|
||||
wg_allowed_ips: "192.168.20.0/24,192.168.200.1/32"
|
||||
wg_dns: "{{ aya01_ip }},{{ pi_ip }},1.1.1.1"
|
||||
|
||||
arr_downloads: "{{ docker_data_dir }}/arr_downloads"
|
||||
#
|
||||
# Sonarr
|
||||
#
|
||||
|
||||
sonarr_port: "8989"
|
||||
sonarr_host: "sonarr"
|
||||
sonarr_config: "{{ docker_dir }}/{{ sonarr_host }}/config"
|
||||
sonarr_media: "{{ plex_tv }}"
|
||||
sonarr_downloads: "{{ arr_downloads }}/{{ sonarr_host }}"
|
||||
|
||||
#
|
||||
# Radarr
|
||||
#
|
||||
|
||||
radarr_port: "7878"
|
||||
radarr_host: "radarr"
|
||||
radarr_config: "{{ docker_dir }}/{{ radarr_host }}/config"
|
||||
radarr_media: "{{ plex_movies }}"
|
||||
radarr_downloads: "{{ arr_downloads }}/{{ radarr_host }}"
|
||||
|
||||
#
|
||||
# Lidarr
|
||||
#
|
||||
|
||||
lidarr_port: "8686"
|
||||
lidarr_host: "lidarr"
|
||||
lidarr_config: "{{ docker_dir }}/{{ lidarr_host }}/config"
|
||||
lidarr_media: "{{ plex_music }}"
|
||||
lidarr_downloads: "{{ arr_downloads }}/{{ lidarr_host }}"
|
||||
|
||||
#
|
||||
# Prowlarr
|
||||
#
|
||||
|
||||
prowlarr_port: "9696"
|
||||
prowlarr_host: "prowlarr"
|
||||
prowlarr_config: "{{ docker_dir }}/{{ prowlarr_host }}/config"
|
||||
|
||||
#
|
||||
# bin
|
||||
#
|
||||
|
||||
bin_port: "6162"
|
||||
bin_host: "bin"
|
||||
bin_upload: "{{ docker_data_dir }}/{{bin_host}}/upload"
|
||||
|
||||
#
|
||||
# qbittorrentvpn
|
||||
#
|
||||
|
||||
qbit_port: "8082"
|
||||
qbit_host: "qbit"
|
||||
qbit_config: "templates/aya01/qbittorrentvpn/config"
|
||||
|
||||
qbit_remote_config: "{{ docker_dir }}/{{ qbit_host }}/config"
|
||||
qbit_downloads: "{{ arr_downloads }}"
|
||||
qbit_type: "openvpn"
|
||||
qbit_ssl: "no"
|
||||
qbit_lan: "192.168.20.0/24, 192.168.30.0/24, {{ docker_network }}"
|
||||
qbit_dns: "{{ aya01_ip }}, {{ pi_ip }}, 1.1.1.1"
|
||||
|
||||
#
|
||||
# qbittorrentvpn - torrentleech
|
||||
#
|
||||
|
||||
torrentleech_port: "8083"
|
||||
torrentleech_host: "torrentleech"
|
||||
torrentleech_remote_config: "{{ docker_dir }}/{{ torrentleech_host }}/config"
|
||||
|
||||
#
|
||||
# Home Assistant
|
||||
#
|
||||
|
||||
hass_port: ""
|
||||
hass_host: "hass"
|
||||
|
||||
#
|
||||
# Tautulli
|
||||
#
|
||||
|
||||
tautulli_port: "8181"
|
||||
tautulli_host: "tautulli"
|
||||
tautulli_config: "{{ docker_dir }}/{{ tautulli_host }}/config"
|
||||
|
||||
#
|
||||
# Code Server
|
||||
#
|
||||
|
||||
code_port: "8443"
|
||||
code_host: "code"
|
||||
code_config: "{{ docker_dir }}/{{ code_host }}/config"
|
||||
|
||||
#
|
||||
# GlueTun
|
||||
#
|
||||
|
||||
gluetun_port: ""
|
||||
gluetun_host: "gluetun"
|
||||
gluetun_country: "Hungary"
|
||||
gluetun_config: "{{ docker_dir }}/{{ gluetun_host }}/config"
|
||||
|
||||
#
|
||||
# NodeExporter
|
||||
#
|
||||
|
||||
node_exporter:
|
||||
port: 9100
|
||||
host: 'node'
|
||||
version: 'latest'
|
||||
serve: 'localhost'
|
||||
options: ''
|
||||
bin_path: /usr/local/bin/node_exporter
|
||||
|
||||
#
|
||||
# Prometheus
|
||||
#
|
||||
|
||||
prometheus_puid: "65534"
|
||||
prometheus_pgid: "65534"
|
||||
prometheus_host: "prometheus"
|
||||
prometheus_data: "{{docker_data_dir}}/prometheus/"
|
||||
prometheus_config: "{{docker_dir}}/prometheus/"
|
||||
prometheus_port: "9090"
|
||||
|
||||
#
|
||||
# Grafana
|
||||
#
|
||||
|
||||
grafana_host: "grafana"
|
||||
grafana_port: "3000"
|
||||
grafana_data: "{{docker_data_dir}}/grafana/"
|
||||
grafana_config: "{{docker_dir}}/grafana/config/"
|
||||
grafana_logs: "{{docker_dir}}/grafana/logs/"
|
||||
grafana_puid: "472"
|
||||
grafana_pgid: "472"
|
||||
|
||||
#
|
||||
# SNMP Exporter
|
||||
#
|
||||
|
||||
snmp_exporter_port: "9116"
|
||||
snmp_exporter_target: "192.168.20.1"
|
||||
snmp_exporter_config: "{{ docker_dir }}/snmp_exporter/"
|
||||
snmp_exporter_host: "snmp_exporter"
|
||||
|
||||
#
|
||||
# SMART Exporter
|
||||
#
|
||||
|
||||
smart_exporter:
|
||||
port: 9633
|
||||
version: 'latest'
|
||||
options: '--web.listen-address=9633'
|
||||
bin_path: /usr/local/bin/smart_exporter
|
||||
|
||||
#
|
||||
# Stirling-pdf
|
||||
#
|
||||
|
||||
stirling:
|
||||
host: "stirling"
|
||||
dns: "pdf"
|
||||
port: 8084
|
||||
|
||||
#
|
||||
# nginx proxy manager
|
||||
#
|
||||
|
||||
nginx:
|
||||
host: "nginx"
|
||||
endpoints:
|
||||
http: 80
|
||||
https: 443
|
||||
admin: 8080
|
||||
paths:
|
||||
letsencrypt: "{{docker_dir}}/nginx/letsencrypt"
|
||||
data: "{{docker_dir}}/nginx/data"
|
||||
|
||||
#
|
||||
# Jellyfin
|
||||
#
|
||||
|
||||
jellyfin:
|
||||
host: "jellyfin"
|
||||
port: "8096"
|
||||
config: "{{docker_dir}}/jellyfin/config"
|
||||
cache: "{{docker_dir}}/jellyfin/cache"
|
||||
media:
|
||||
tv: "{{ plex_tv }}"
|
||||
movies: "{{ plex_movies }}"
|
||||
music: "{{ plex_music }}"
|
||||
|
||||
#
|
||||
# paperless-ngx
|
||||
#
|
||||
|
||||
paperless:
|
||||
host: "paperless"
|
||||
port: "8000"
|
||||
data:
|
||||
data: "{{ docker_dir }}/paperless/data/data"
|
||||
media: "{{ docker_dir }}/paperless/data/media"
|
||||
export: "{{ docker_dir }}/paperless/data/export"
|
||||
consume: "{{ docker_dir }}/paperless/data/consume"
|
||||
db:
|
||||
host: "paperless-sqlite"
|
||||
db: "paperless"
|
||||
user: "paperless"
|
||||
password: "{{ host.paperless.db.password }}"
|
||||
data: "{{ docker_dir }}/paperless/db/data"
|
||||
redis:
|
||||
host: "paperless-redis"
|
||||
data: "{{ docker_dir }}/paperless/redis/data"
|
||||
|
||||
#
|
||||
# Homarr
|
||||
#
|
||||
|
||||
homarr:
|
||||
host: "homarr"
|
||||
volumes:
|
||||
configs: "{{docker_dir}}/homarr/configs"
|
||||
data: "{{ docker_data_dir }}/homarr/data/"
|
||||
icons: "{{docker_dir}}/homarr/icons"
|
||||
|
||||
#
|
||||
# gitea
|
||||
#
|
||||
|
||||
gitea:
|
||||
host: "git"
|
||||
url: "https://git.tudattr.dev"
|
||||
volumes:
|
||||
data: "{{ docker_data_dir }}/gitea/data"
|
||||
config: "{{ docker_dir }}/gitea/config"
|
||||
ports:
|
||||
http: "3000"
|
||||
ssh: "2222"
|
||||
runner:
|
||||
host: "gitea-runner-{{ host.hostname }}"
|
||||
token: "{{ host.gitea.runner.token }}"
|
||||
name: "{{ host.hostname }}"
|
||||
volumes:
|
||||
data: "{{ docker_data_dir }}/gitea/runner/data/"
|
||||
config: "{{ docker_dir }}/gitea/runner/config/"
|
||||
config_file: "{{ docker_dir }}/gitea/runner/config/config.yml"
|
||||
|
||||
#
|
||||
# Jellyseer
|
||||
#
|
||||
|
||||
jellyseer:
|
||||
host: "jellyseer"
|
||||
ports:
|
||||
http: "5055"
|
||||
volumes:
|
||||
config: "{{ docker_dir }}/jellyseer/config"
|
||||
arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
|
||||
502
group_vars/docker/docker.yml
Normal file
502
group_vars/docker/docker.yml
Normal file
@@ -0,0 +1,502 @@
|
||||
docker:
|
||||
url: "https://download.docker.com/linux"
|
||||
apt_release_channel: "stable"
|
||||
directories:
|
||||
local: "/opt/local/"
|
||||
config: "/opt/docker/config/"
|
||||
compose: "/opt/docker/compose/"
|
||||
|
||||
services:
|
||||
- name: status
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: kuma
|
||||
image: louislam/uptime-kuma:1.23.16
|
||||
volumes:
|
||||
- name: "Data"
|
||||
internal: /app/data
|
||||
external: "{{ docker.directories.local }}/kuma/"
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 3001
|
||||
external: "{{ services_external_http.kuma }}"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- name: plex
|
||||
vm:
|
||||
- docker-host10
|
||||
container_name: plex
|
||||
image: lscr.io/linuxserver/plex:1.41.5
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/plex/config/"
|
||||
- name: "TV Series"
|
||||
internal: /tv:ro
|
||||
external: /media/series
|
||||
- name: "Movies"
|
||||
internal: /movies:ro
|
||||
external: /media/movies
|
||||
- name: "Music"
|
||||
internal: /music:ro
|
||||
external: /media/songs
|
||||
devices:
|
||||
- name: "Graphics Card"
|
||||
internal: /dev/dri
|
||||
external: /dev/dri
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 32400
|
||||
external: "{{ services_external_http.plex }}"
|
||||
- name: ""
|
||||
internal: 1900
|
||||
external: 1900
|
||||
- name: ""
|
||||
internal: 3005
|
||||
external: 3005
|
||||
- name: ""
|
||||
internal: 5353
|
||||
external: 5353
|
||||
- name: ""
|
||||
internal: 32410
|
||||
external: 32410
|
||||
- name: ""
|
||||
internal: 8324
|
||||
external: 8324
|
||||
- name: ""
|
||||
internal: 32412
|
||||
external: 32412
|
||||
- name: ""
|
||||
internal: 32469
|
||||
external: 32469
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- VERSION=docker
|
||||
- name: jellyfin
|
||||
vm:
|
||||
- docker-host01
|
||||
container_name: jellyfin
|
||||
image: jellyfin/jellyfin:10.10
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/jellyfin/config"
|
||||
- name: "Cache"
|
||||
internal: /cache
|
||||
external: "{{ docker.directories.config }}/jellyfin/cache"
|
||||
- name: "Tv Series"
|
||||
internal: /tv:ro
|
||||
external: /media/series
|
||||
- name: "Music"
|
||||
internal: /movies:ro
|
||||
external: /media/movies
|
||||
- name: "Music"
|
||||
internal: /music:ro
|
||||
external: /media/songs
|
||||
devices:
|
||||
- name: "Graphics Card"
|
||||
internal: /dev/dri
|
||||
external: /dev/dri
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8096
|
||||
external: "{{ services_external_http.jellyfin }}"
|
||||
environment:
|
||||
- name: hass
|
||||
vm:
|
||||
- docker-host01
|
||||
container_name: homeassistant
|
||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
||||
privileged: true
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config/
|
||||
external: "{{ docker.directories.local }}/home-assistant/config/"
|
||||
- name: "Local Time"
|
||||
internal: /etc/localtime:ro
|
||||
external: /etc/localtime
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8123
|
||||
external: "{{ services_external_http.hass }}"
|
||||
- name: ""
|
||||
internal: 4357
|
||||
external: 4357
|
||||
- name: ""
|
||||
internal: 5683
|
||||
external: 5683
|
||||
- name: ""
|
||||
internal: 5683
|
||||
external: 5683
|
||||
- name: ddns
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: ddns-updater
|
||||
image: qmcgaw/ddns-updater:2
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /updater/data/
|
||||
external: "{{ docker.directories.local }}/ddns-updater/data/"
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8000
|
||||
external: "{{ services_external_http.ddns }}"
|
||||
- name: sonarr
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: sonarr
|
||||
image: linuxserver/sonarr:4.0.14
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/sonarr/config"
|
||||
- name: "Tv Series"
|
||||
internal: /tv
|
||||
external: /media/series
|
||||
- name: "Torrent Downloads"
|
||||
internal: /downloads
|
||||
external: /media/docker/data/arr_downloads/sonarr
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8989
|
||||
external: "{{ services_external_http.sonarr }}"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- name: radarr
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: radarr
|
||||
image: linuxserver/radarr:5.21.1
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/radarr/config"
|
||||
- name: "Movies"
|
||||
internal: /movies
|
||||
external: /media/movies
|
||||
- name: "Torrent Downloads"
|
||||
internal: /downloads
|
||||
external: /media/docker/data/arr_downloads/radarr
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 7878
|
||||
external: "{{ services_external_http.radarr }}"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- name: lidarr
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: lidarr
|
||||
image: linuxserver/lidarr:2.10.3
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/lidarr/config"
|
||||
- name: "Music"
|
||||
internal: /music
|
||||
external: /media/songs
|
||||
- name: "Torrent Downloads"
|
||||
internal: /downloads
|
||||
external: /media/docker/data/arr_downloads/lidarr
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8686
|
||||
external: "{{ services_external_http.lidarr }}"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- name: prowlarr
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: prowlarr
|
||||
image: linuxserver/prowlarr:1.32.2
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/prowlarr/config"
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 9696
|
||||
external: "{{ services_external_http.prowlarr }}"
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- name: paperless
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: paperless
|
||||
image: ghcr.io/paperless-ngx/paperless-ngx:2.14
|
||||
depends_on:
|
||||
- paperless-postgres
|
||||
- paperless-redis
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /usr/src/paperless/data
|
||||
external: "{{ docker.directories.local }}/paperless/data/data"
|
||||
- name: "Media"
|
||||
internal: /usr/src/paperless/media
|
||||
external: "{{ docker.directories.local }}/paperless/data/media"
|
||||
- name: "Document Export"
|
||||
internal: /usr/src/paperless/export
|
||||
external: "{{ docker.directories.local }}/paperless/data/export"
|
||||
- name: "Document Consume"
|
||||
internal: /usr/src/paperless/consume
|
||||
external: "{{ docker.directories.local }}/paperless/data/consume"
|
||||
environment:
|
||||
- "PAPERLESS_REDIS=redis://paperless-redis:6379"
|
||||
- "PAPERLESS_DBHOST=paperless-postgres"
|
||||
- "PAPERLESS_DBUSER=paperless"
|
||||
- "PAPERLESS_DBPASS={{ vault.docker.paperless.dbpass }}"
|
||||
- "USERMAP_UID=1000"
|
||||
- "USERMAP_GID=1000"
|
||||
- "PAPERLESS_URL=https://paperless.{{ domain }}"
|
||||
- "PAPERLESS_TIME_ZONE=Europe/Berlin"
|
||||
- "PAPERLESS_OCR_LANGUAGE=deu"
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8000
|
||||
external: "{{ services_external_http.paperless }}"
|
||||
sub_service:
|
||||
- name: postgres
|
||||
version: 15
|
||||
username: paperless
|
||||
password: "{{ vault.docker.paperless.dbpass }}"
|
||||
- name: redis
|
||||
version: 7
|
||||
- name: pdf
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: stirling
|
||||
image: frooodle/s-pdf:0.45.0
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8080
|
||||
external: "{{ services_external_http.pdf }}"
|
||||
- name: git
|
||||
vm:
|
||||
- docker-host01
|
||||
container_name: gitea
|
||||
image: gitea/gitea:1.23-rootless
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /etc/gitea
|
||||
external: "{{ docker.directories.local }}/gitea/config"
|
||||
- name: "Data"
|
||||
internal: /var/lib/gitea
|
||||
external: "{{ docker.directories.local }}/gitea/data"
|
||||
- name: "Time Zone"
|
||||
internal: /etc/timezone:ro
|
||||
external: /etc/timezone
|
||||
- name: "Local Time"
|
||||
internal: /etc/localtime:ro
|
||||
external: /etc/localtime
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 3000
|
||||
external: "{{ services_external_http.git }}"
|
||||
- name: "ssh"
|
||||
internal: 2222
|
||||
external: 2222
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- name: changedetection
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: changedetection
|
||||
image: dgtlmoon/changedetection.io:0.49
|
||||
healthcheck: curl
|
||||
volumes:
|
||||
- name: "Data"
|
||||
internal: /datastore
|
||||
external: "{{ docker.directories.local }}/changedetection/data/"
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 5000
|
||||
external: "{{ services_external_http.changedetection }}"
|
||||
- name: gluetun
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: gluetun
|
||||
image: qmcgaw/gluetun:v3.40
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
devices:
|
||||
- name: "Tunnel"
|
||||
internal: /dev/net/tun
|
||||
external: /dev/net/tun
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /gluetun
|
||||
external: "{{ docker.directories.local }}/gluetun/config"
|
||||
ports:
|
||||
- name: "Qbit Client"
|
||||
internal: 8082
|
||||
external: 8082
|
||||
- name: "Torrentleech Client"
|
||||
internal: 8083
|
||||
external: 8083
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- VPN_SERVICE_PROVIDER=protonvpn
|
||||
- UPDATER_VPN_SERVICE_PROVIDERS=protonvpn
|
||||
- UPDATER_PERIOD=24h
|
||||
- "SERVER_COUNTRIES={{ vault.docker.proton.country }}"
|
||||
- "OPENVPN_USER={{ vault.docker.proton.openvpn_user }}"
|
||||
- "OPENVPN_PASSWORD={{ vault.docker.proton.openvpn_password }}"
|
||||
- name: torrentleech
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: torrentleech
|
||||
image: qbittorrentofficial/qbittorrent-nox
|
||||
depends_on:
|
||||
- gluetun
|
||||
network_mode: "container:gluetun"
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/torrentleech/config"
|
||||
- name: "Downloads"
|
||||
internal: /downloads
|
||||
external: /media/docker/data/arr_downloads
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: proxy_only
|
||||
external: 8083
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- QBT_EULA="accept"
|
||||
- QBT_WEBUI_PORT="8083"
|
||||
- name: qbit
|
||||
vm:
|
||||
- docker-host12
|
||||
container_name: qbit
|
||||
image: qbittorrentofficial/qbittorrent-nox:5.0.4-1
|
||||
depends_on:
|
||||
- gluetun
|
||||
network_mode: "container:gluetun"
|
||||
volumes:
|
||||
- name: "Configuration"
|
||||
internal: /config
|
||||
external: "{{ docker.directories.local }}/qbit/config"
|
||||
- name: "Downloads"
|
||||
internal: /downloads
|
||||
external: /media/docker/data/arr_downloads
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: proxy_only
|
||||
external: 8082
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Europe/Berlin
|
||||
- QBT_EULA="accept"
|
||||
- QBT_WEBUI_PORT="8082"
|
||||
- name: cadvisor
|
||||
vm:
|
||||
- docker-host12
|
||||
- docker-host10
|
||||
- docker-host01
|
||||
container_name: cadvisor
|
||||
image: gcr.io/cadvisor/cadvisor:v0.52.1
|
||||
ports:
|
||||
- name: ""
|
||||
internal: 8080
|
||||
external: 8081
|
||||
volumes:
|
||||
- name: "Root"
|
||||
internal: /rootfs:ro
|
||||
external: /
|
||||
- name: "Run"
|
||||
internal: /var/run:rw
|
||||
external: /var/run
|
||||
- name: "System"
|
||||
internal: /sys:ro
|
||||
external: /sys
|
||||
- name: "Docker"
|
||||
internal: /var/lib/docker:ro
|
||||
external: /var/lib/docker
|
||||
- name: karakeep
|
||||
vm:
|
||||
- docker-host01
|
||||
container_name: karakeep
|
||||
image: ghcr.io/karakeep-app/karakeep:0.23.2
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 3000
|
||||
external: "{{ services_external_http.karakeep }}"
|
||||
volumes:
|
||||
- name: "Data"
|
||||
internal: /data
|
||||
external: "{{ docker.directories.local }}/karakeep/config"
|
||||
environment:
|
||||
- MEILI_ADDR=http://karakeep-meilisearch:7700
|
||||
- BROWSER_WEB_URL=http://karakeep-chrome:9222
|
||||
- NEXTAUTH_SECRET={{ vault.docker.karakeep.nextauth_secret }}
|
||||
- MEILI_MASTER_KEY={{ vault.docker.karakeep.meili_master_key }}
|
||||
- NEXTAUTH_URL=https://karakeep.tudattr.dev/
|
||||
- OPENAI_API_KEY={{ vault.docker.karakeep.openai_key }}
|
||||
- DATA_DIR=/data
|
||||
- DISABLE_SIGNUPS=true
|
||||
sub_service:
|
||||
- name: meilisearch
|
||||
version: v1.11.1
|
||||
nextauth_secret: "{{ vault.docker.karakeep.nextauth_secret }}"
|
||||
meili_master_key: "{{ vault.docker.karakeep.meili_master_key }}"
|
||||
openai_key: "{{ vault.docker.karakeep.openai_key }}"
|
||||
- name: chrome
|
||||
version: 123
|
||||
- name: keycloak
|
||||
vm:
|
||||
- docker-host01
|
||||
container_name: keycloak
|
||||
image: quay.io/keycloak/keycloak:26.2
|
||||
depends_on:
|
||||
- keycloak-postgres
|
||||
ports:
|
||||
- name: "http"
|
||||
internal: 8080
|
||||
external: "{{ services_external_http.keycloak }}"
|
||||
volumes:
|
||||
- name: "config"
|
||||
internal: /opt/keycloak/data/import/homelab-realm.json
|
||||
external: "{{ docker.directories.local }}/keycloak/homelab-realm.json"
|
||||
- name: "config"
|
||||
internal: /opt/keycloak/data/import/master-realm.json
|
||||
external: "{{ docker.directories.local }}/keycloak/master-realm.json"
|
||||
command:
|
||||
- "start"
|
||||
- "--import-realm"
|
||||
environment:
|
||||
- KC_DB=postgres
|
||||
- KC_DB_URL=jdbc:postgresql://keycloak-postgres:5432/keycloak
|
||||
- KC_DB_USERNAME={{ keycloak_config.database.username }}
|
||||
- KC_DB_PASSWORD={{ keycloak_config.database.password }}
|
||||
- KC_HOSTNAME=keycloak.{{ internal_domain }}
|
||||
- KC_HTTP_ENABLED=true
|
||||
- KC_HTTP_RELATIVE_PATH=/
|
||||
- KC_PROXY=edge
|
||||
- KC_PROXY_HEADERS=xforwarded
|
||||
- KC_HOSTNAME_URL=https://keycloak.{{ internal_domain }}
|
||||
- KC_HOSTNAME_ADMIN_URL=https://keycloak.{{ internal_domain }}
|
||||
- KC_BOOTSTRAP_ADMIN_USERNAME=serviceadmin-{{ keycloak_admin_hash }}
|
||||
- KC_BOOTSTRAP_ADMIN_PASSWORD={{ vault.docker.keycloak.admin.password }}
|
||||
sub_service:
|
||||
- name: postgres
|
||||
version: 17
|
||||
username: "{{ keycloak_config.database.username }}"
|
||||
password: "{{ keycloak_config.database.password }}"
|
||||
61
group_vars/docker/keycloak.yml
Normal file
61
group_vars/docker/keycloak.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
keycloak_admin_hash: "{{ vault.docker.keycloak.admin.hash }}"
|
||||
|
||||
keycloak_realms: "{{ keycloak_config.realms }}"
|
||||
|
||||
keycloak_config:
|
||||
database:
|
||||
db_name: keycloak
|
||||
username: keycloak
|
||||
password: "{{ vault.docker.keycloak.database.password }}"
|
||||
realms:
|
||||
- realm: homelab
|
||||
display_name: "Homelab Realm"
|
||||
users:
|
||||
- username: tudattr
|
||||
password: "{{ vault.docker.keycloak.user.password }}"
|
||||
realm_roles:
|
||||
- offline_access
|
||||
- uma_authorization
|
||||
client_roles:
|
||||
account:
|
||||
- view-profile
|
||||
- manage-account
|
||||
admin:
|
||||
username: "serviceadmin-{{ keycloak_admin_hash }}"
|
||||
password: "{{ vault.docker.keycloak.admin.password }}"
|
||||
realm_roles:
|
||||
- offline_access
|
||||
- uma_authorization
|
||||
- admin
|
||||
client_roles:
|
||||
realm_management:
|
||||
- realm-admin
|
||||
account:
|
||||
- view-profile
|
||||
- manage-account
|
||||
roles:
|
||||
realm:
|
||||
- name: admin
|
||||
description: "Administrator role for the homelab realm"
|
||||
default_roles:
|
||||
- offline_access
|
||||
- uma_authorization
|
||||
- realm: master
|
||||
display_name: "master"
|
||||
admin:
|
||||
username: "serviceadmin-{{ keycloak_admin_hash }}"
|
||||
password: "{{ vault.docker.keycloak.admin.password }}"
|
||||
realm_roles:
|
||||
- offline_access
|
||||
- uma_authorization
|
||||
- create-realm
|
||||
- admin
|
||||
client_roles:
|
||||
realm_management:
|
||||
- realm-admin
|
||||
account:
|
||||
- view-profile
|
||||
- manage-account
|
||||
roles:
|
||||
realm: []
|
||||
default_roles: []
|
||||
18
group_vars/docker/port_mapping.yml
Normal file
18
group_vars/docker/port_mapping.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
services_external_http:
|
||||
kuma: 3001
|
||||
plex: 32400
|
||||
jellyfin: 8096
|
||||
hass: 8123
|
||||
ddns: 8001
|
||||
sonarr: 8989
|
||||
radarr: 7878
|
||||
lidarr: 8686
|
||||
prowlarr: 9696
|
||||
paperless: 8000
|
||||
pdf: 8080
|
||||
git: 3000
|
||||
changedetection: 5000
|
||||
torrentleech: 8083
|
||||
qbit: 8082
|
||||
karakeep: 3002
|
||||
keycloak: 3003
|
||||
65
group_vars/docker/secrets.yml
Normal file
65
group_vars/docker/secrets.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
62353938306631616432613936343031386266643837393733336533306532643332383761336462
|
||||
3566663762343161373266353236323532666562383031310a663661316264313737633732313166
|
||||
35336535353964646238393563333339646634346532633130633364343864363565353461616663
|
||||
6336343138623762320a366132383634383231316130643535313465356238343534656237626362
|
||||
38373439663730353739386636313865336262363864323633343839636434353261313432386135
|
||||
33343438663564323465373435613765306538633339303362656163636237643661623637376135
|
||||
65346465303530663161356666333062326536313135363536313237616564363838326339646162
|
||||
62323066626431376231386432333766366434326239303734353036396433333662333733373830
|
||||
66336433643032636166306332323063393333363734326333363936303033396336626135363832
|
||||
30636136656235376163613033616563663663633161643937666537333066343135326138643663
|
||||
64646638393364376466306438383337383231303637313366333638393939373739646338353036
|
||||
62303162383362393830316163303236336236363531333665353163373530323063313164656562
|
||||
33383561613530346561336166653536393137346630333262633738383838383338643761666463
|
||||
61303239636631646634373266303930343437636464326132316534616261376137396233653265
|
||||
39383137666533613739363764643162663361333465386332383964343534646537343065343833
|
||||
66643938623734643537313866316335396135613239393262613562356332663861646261373630
|
||||
34373939663239646534396638636265303438386239636439663635313665613634373832313237
|
||||
62306366633139333937646534393765663130396466346161376235656461346638323063353662
|
||||
64386466373433376133343266396537656435333831356531346531653262396330346238623431
|
||||
61303466366161336664333239663066643232623532643933373661663266366639646139666636
|
||||
62393532643535656566643862353337333533633861396164643766316637393638363662653863
|
||||
32643566333961663065383636383436666137356237643634326464636463303530306466616635
|
||||
36366365636337366335333630306237356366306535613464636463373063653861623464323764
|
||||
62336139653361376239303632326431643231346137333835356362333962613039643332373166
|
||||
32316234376431376136666161383039633035356636626664376137323630323966646161313664
|
||||
38623463376366623430663363663662303166636165646138363631643261376137336636636663
|
||||
61656631393963353066333930303932653730613431366131616233363662316139663038336538
|
||||
36383532316162356235373566313832323131326466363734613438323233353330613561383435
|
||||
39623435366236306431636232323838386462346464653561653638346338613833613133373133
|
||||
38626364643738373938336237323836646532356539643933333730353333626138646239633234
|
||||
66316563306230636139323335323665646462343861393366666462623966376431393438376134
|
||||
37376339356430316235633337376462666439643430303062656538386630613763623433646133
|
||||
65663530626533663266623861326431633137363466346634656634623166623331306636616666
|
||||
31643761343632336531356566636165363737646639326533386333646434393736643934643064
|
||||
39393039346639353439653766326138613164343030306436383461663636346534346365333265
|
||||
66653535623962653762633934646131653334363232636634303130306632383263373161363462
|
||||
35323133616665366238353535346561323834353634613730613439643536376337353234313337
|
||||
61623264616433336532383533376631396438313739616462323064613665396638333438306336
|
||||
34633338366235336131303462346665663464376334353431343363336662356335356562366532
|
||||
64366461623864633238666339346138663931363331613463333762336230313530613235303766
|
||||
34313064383461623230383730623731323533326663613565646436303230653264323061616536
|
||||
38636162356164656432626433373864326264623063343662323563366133363336313739326137
|
||||
31326164646364613865396534626533616366613565303032636637366435326336396464313232
|
||||
66393538393862616466313833326666316231393130666238636130613339663664393434613732
|
||||
65383363323138343335393636626138303561613532306131666334346631336333336639626466
|
||||
38343337346566346334383934306433366239666662346463666166643338613264636563653434
|
||||
36306338313363636665333763323135386165313939336432636339613432323736326635303162
|
||||
36656234656563376633373333633430313430333834623964653530626539333265363563376239
|
||||
33633430396338663063383338333732356532313435613737393465323431393035356136306166
|
||||
62633035653731636361396235613162643332393233326434353831613731373333326464326234
|
||||
36366166633437356336616166306164343636623962623136653861333866393039653939333037
|
||||
31343261663534356530373233336165326134613961616331316531313435386464396438363838
|
||||
31353935666566326630373336376438326366623537356536653564303066343837653030373962
|
||||
30393363336232646662663166326166386636356466616165376435623031666664373664623330
|
||||
31613030616162303732353738386434666566386138373238363732303138316533356435656662
|
||||
38636136353134303166636438663036363834663639613464376662666364386635333138353035
|
||||
39363236653336386332313930306663366130303836333664363335386331636431623036336535
|
||||
32366339386539306364343065323263366563643663623731643866346232653838333561336331
|
||||
36363030383263666137393035356331323038316239356637303665653164363739313664396235
|
||||
32366231613532323865623861636263383731303164366333303636356633323161653635393830
|
||||
38616139656264393932353332303264393038396663663236353838343432373965663561333531
|
||||
36363432323362643634623030356539396562633238653732313739616464643436666130633364
|
||||
37383764623938626332316630636630343236663338323661333933333730333630353061653061
|
||||
62656233653439353438
|
||||
8
group_vars/docker/vars.yml
Normal file
8
group_vars/docker/vars.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
caddy:
|
||||
admin_email: me+acme@tudattr.dev
|
||||
|
||||
domain: "{{ internal_domain }}"
|
||||
|
||||
netcup_api_key: "{{ vault.netcup.api_key }}"
|
||||
netcup_api_password: "{{ vault.netcup.api_password }}"
|
||||
netcup_customer_id: "{{ vault.netcup.customer_id }}"
|
||||
26
group_vars/k3s/secrets.yml
Normal file
26
group_vars/k3s/secrets.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66323965326561656434636164616434353663633933346332373537663136323465323461306337
|
||||
3733663066623866333534366430663761653262646662650a323938306636653965656361646330
|
||||
66313965376537643033666165366435653862663231383366636166373238666334313836313138
|
||||
6164353263323136300a653236636334643832396534623735316465623133373838353163313136
|
||||
33303331313037376336623637356633383734343338386634646335616632646366366138643539
|
||||
37303531346430323330396637316632643065346537386433663431373437376261366263306264
|
||||
63323235303632356661373463383565613764323733343839653139613766633036346234316432
|
||||
37626432333935613566386631346161623133366438343630316237363730626234336462303132
|
||||
38323132363631653432643462306133323266333637346139343961623430363436663763383234
|
||||
66343232386263646633653739343963333364386630376638396261326563333935643437646638
|
||||
63656664633838336535613963393434336264656265356238306237626361336533643363323838
|
||||
30376236613236386133383130633164306632323630383932383432353439646266386239383834
|
||||
32346431306662346166653738333138643733623739623536303639663136336533373230643533
|
||||
64323037303161306435316662653237356161393239656362383261306366336134353438326233
|
||||
62363532396336616261383735386535396363386339333962623233383534393033306662666266
|
||||
66316237616137366639333439613732666638376163373235306663323762613466363636346337
|
||||
38393762653537316134316234363066363439623164356237313566626533326332646663313838
|
||||
38383633616538353833353634376236656433383464303538613663383838633538616136313365
|
||||
64643438316638333433366137656634353039663763353734616432306465386563353665666136
|
||||
63383739323038333537396433303332343235383562376438633237663465396366643438353862
|
||||
32646637323530356432386662613366323234323639653139306665623865613666623133656465
|
||||
31636334666638623939393366663935363434613731386365303130343439376430613331663561
|
||||
30353738346138343563383738393666333761333231303366386563303165363039313263343563
|
||||
36303533353165323461376461623665313938356535363462663737643265636137613366616639
|
||||
38383761343161336462373563383338393435326331353132333336666330306638
|
||||
28
group_vars/k3s/vars.yml
Normal file
28
group_vars/k3s/vars.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
db:
|
||||
default_user:
|
||||
user: "postgres"
|
||||
name: "k3s"
|
||||
user: "k3s"
|
||||
password: "{{ vault_k3s.postgres.db.password }}"
|
||||
listen_address: "{{ k3s.db.ip }}"
|
||||
|
||||
k3s:
|
||||
net: "192.168.20.0/24"
|
||||
server:
|
||||
ips:
|
||||
- 192.168.20.21
|
||||
- 192.168.20.24
|
||||
- 192.168.20.30
|
||||
loadbalancer:
|
||||
ip: 192.168.20.22
|
||||
default_port: 6443
|
||||
db:
|
||||
ip: 192.168.20.23
|
||||
default_port: "5432"
|
||||
agent:
|
||||
ips:
|
||||
- 192.168.20.25
|
||||
- 192.168.20.26
|
||||
- 192.168.20.27
|
||||
|
||||
k3s_db_connection_string: "postgres://{{ db.user }}:{{ db.password }}@{{ k3s.db.ip }}:{{ k3s.db.default_port }}/{{ db.name }}"
|
||||
2
group_vars/proxmox/containers.yml
Normal file
2
group_vars/proxmox/containers.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
lxcs:
|
||||
- name: "test-lxc-00"
|
||||
15
group_vars/proxmox/secrets.yml
Normal file
15
group_vars/proxmox/secrets.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35333866323538343132373761316430616539643436646637633131366232346566656438303438
|
||||
3539333661363964633834613161626134323533653737650a613832323436663739663162303066
|
||||
31333130646631306539356233346632636132346539343734393065353033613865363466646632
|
||||
6565343937666530330a326130393934326435643837323631653862313232363466643534306131
|
||||
62376132383137336230366538326364663362346137613930633161663834393835623935373164
|
||||
65623564633765653137623361376130623363613263313835366464313039613532323661363461
|
||||
37366438616566643537656639316665363339633737363539636364316335663639303364663366
|
||||
62653734343364663830633534643931656439313763366138323663373464303137323864313637
|
||||
65316135343464393031343166366338323839326631623533343931353833643232643339386231
|
||||
38623735386465383964653663346631376531376261353933346661666131353533633331353437
|
||||
63336366623333653732306130316264393865633338653238303861646535343837396232366134
|
||||
63343037636361323239376436326431623165326366383561323832323730636532623039383734
|
||||
66663139656262643038303435346666323762343661336234663131343531636161636536646465
|
||||
6530333864323262363536393562346362306161653162346132
|
||||
20
group_vars/proxmox/secrets_vm.yml
Normal file
20
group_vars/proxmox/secrets_vm.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35616266333838306161336339353538306634373132626132643732303066303163343630333630
|
||||
6338393762616262303038373334663230383464643836370a656538393531393134616463643239
|
||||
36383330653339393362353838313639333432643535643833396535653632376336613130646663
|
||||
3532646538363137630a363731613235653935316531616430346264643837306434386333373033
|
||||
33663135653931373963343734366562386263663939383536663439383537333264666233343233
|
||||
62626162666538333435396638393338393734656131303065616534613733353335643939333765
|
||||
38326237343337363064666530303664326563633262313432343030336266373437353837346461
|
||||
63333363626164316638346635666537613963383537313965373638303732353365623166363736
|
||||
31633239646262613539646637663664313337353465636366313338303439613638653530656631
|
||||
62396536316561623736633631623336313537646138383431633538303163303261323864383538
|
||||
38626338373332653561343036323236383337343037356366626230646432646538373836303063
|
||||
61346339376561626630653562346439306561643664666437386562356535303264646338326261
|
||||
33636536663161366635666264663539653037306339316233643662643134396636636162656333
|
||||
36666139376263646130333263653335333165356462363434373439313330383331356138333431
|
||||
31633362343639376436616339656561316433346532346533336261383433366366396261366134
|
||||
35363264373335616165643665653466613434386630373232386261393464376361313131386462
|
||||
33333531336334386562356338623233313862316232356562373561633364363263306465333439
|
||||
37386631626538636365376464653837333662363361653237366161316431653266643238346336
|
||||
363863376530613036313866323965326638
|
||||
3
group_vars/proxmox/vars.yml
Normal file
3
group_vars/proxmox/vars.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
proxmox_api_user: root
|
||||
proxmox_api_host: 192.168.20.12
|
||||
proxmox_api_password: "{{ vault.pve.aya01.root.sudo }}"
|
||||
36
group_vars/proxmox/vms.yml
Normal file
36
group_vars/proxmox/vms.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
vms:
|
||||
- name: "docker-host10"
|
||||
node: "lulu"
|
||||
vmid: 410
|
||||
cores: 2
|
||||
memory: 4096 # in MiB
|
||||
net:
|
||||
net0: "virtio,bridge=vmbr0,firewall=1"
|
||||
boot_image: "{{ proxmox_cloud_init_images.debian.name }}"
|
||||
ciuser: "{{ user }}"
|
||||
sshkeys: "{{ pubkey }}"
|
||||
disk_size: 128 # in Gb
|
||||
hostpci:
|
||||
hostpci0: "0000:00:02.0"
|
||||
- name: "docker-host11"
|
||||
node: "lulu"
|
||||
vmid: 411
|
||||
cores: 2
|
||||
memory: 4096 # in MiB
|
||||
net:
|
||||
net0: "virtio,bridge=vmbr0,firewall=1"
|
||||
boot_image: "{{ proxmox_cloud_init_images.ubuntu.name }}"
|
||||
ciuser: "{{ user }}"
|
||||
sshkeys: "{{ pubkey }}"
|
||||
disk_size: 128 # in Gb
|
||||
- name: "docker-host12"
|
||||
node: "naruto01"
|
||||
vmid: 412
|
||||
cores: 4
|
||||
memory: 8192
|
||||
net:
|
||||
net0: "virtio,bridge=vmbr0,firewall=1"
|
||||
boot_image: "{{ proxmox_cloud_init_images.ubuntu.name }}"
|
||||
ciuser: "{{ user }}"
|
||||
sshkeys: "{{ pubkey }}"
|
||||
disk_size: 128 # in Gb
|
||||
@@ -1,53 +1,10 @@
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.12
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: '{{ pk_path }}'
|
||||
ansible_become_pass: '{{ vault.aya01.sudo }}'
|
||||
|
||||
host:
|
||||
hostname: "aya01"
|
||||
ip: "{{ ansible_host }}"
|
||||
backblaze:
|
||||
account: "{{ vault.aya01.backblaze.account }}"
|
||||
key: "{{ vault.aya01.backblaze.key }}"
|
||||
remote: "remote:aya01-tudattr-dev"
|
||||
password: "{{ vault.aya01.rclone.password }}"
|
||||
password2: "{{ vault.aya01.rclone.password2 }}"
|
||||
paths:
|
||||
- "{{ docker_compose_dir }}"
|
||||
- "{{ docker_dir }}"
|
||||
fstab:
|
||||
- name: "config"
|
||||
path: "/opt"
|
||||
type: "ext4"
|
||||
uuid: "cad60133-dd84-4a2a-8db4-2881c608addf"
|
||||
- name: "media0"
|
||||
path: "/mnt/media0"
|
||||
type: "ext4"
|
||||
uuid: "c4c724ec-4fe3-4665-adf4-acd31d6b7f95"
|
||||
- name: "media1"
|
||||
path: "/mnt/media1"
|
||||
type: "ext4"
|
||||
uuid: "8d66d395-1e35-4f5a-a5a7-d181d6642ebf"
|
||||
mergerfs:
|
||||
- name: "media"
|
||||
path: "/media"
|
||||
branches:
|
||||
- "/mnt/media0"
|
||||
- "/mnt/media1"
|
||||
opts:
|
||||
- "use_ino"
|
||||
- "allow_other"
|
||||
- "cache.files=partial"
|
||||
- "dropcacheonclose=true"
|
||||
- "category.create=mfs"
|
||||
type: "fuse.mergerfs"
|
||||
samba:
|
||||
password: "{{ vault.aya01.samba.password }}"
|
||||
paperless:
|
||||
db:
|
||||
password: "{{ vault.aya01.paperless.db.password }}"
|
||||
gitea:
|
||||
runner:
|
||||
token: "{{ vault.aya01.gitea.runner.token }}"
|
||||
name: "aya01"
|
||||
---
|
||||
# ansible_user: "root"
|
||||
# ansible_host: 192.168.20.12
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
# ansible_become_pass: "{{ vault.pve.aya01.root.sudo }}"
|
||||
#
|
||||
# host:
|
||||
# hostname: "aya01"
|
||||
# ip: "{{ ansible_host }}"
|
||||
|
||||
10
host_vars/docker-host00.yml
Normal file
10
host_vars/docker-host00.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# Configure this in ~/.ssh/config*
|
||||
# ansible_user: "{{ user }}"
|
||||
# ansible_host: 192.168.20.34
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host00.sudo }}"
|
||||
# host:
|
||||
# hostname: "docker-host00"
|
||||
# ip: "192.168.20.34"
|
||||
11
host_vars/docker-host01.yml
Normal file
11
host_vars/docker-host01.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# Configure this in ~/.ssh/config*
|
||||
# ansible_user: "{{ user }}"
|
||||
# ansible_host: 192.168.20.35
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host01.sudo }}"
|
||||
#
|
||||
# host:
|
||||
# hostname: "docker-host01"
|
||||
# ip: "192.168.20.35"
|
||||
10
host_vars/docker-host02.yml
Normal file
10
host_vars/docker-host02.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# Configure this in ~/.ssh/config*
|
||||
# ansible_user: "{{ user }}"
|
||||
# ansible_host: 192.168.20.36
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.host02.sudo }}"
|
||||
# host:
|
||||
# hostname: "docker-host02"
|
||||
# ip: "192.168.20.36"
|
||||
9
host_vars/docker-lb.yml
Normal file
9
host_vars/docker-lb.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# ansible_user: "{{ user }}"
|
||||
# ansible_host: 192.168.20.37
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.docker.lb.sudo }}"
|
||||
# host:
|
||||
# hostname: "docker-lb"
|
||||
# ip: "192.168.20.37"
|
||||
@@ -1,10 +1,10 @@
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.14
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: '{{ pk_path }}'
|
||||
ansible_become_pass: '{{ vault.inko.sudo }}'
|
||||
|
||||
host:
|
||||
ip: "{{ ansible_host }}"
|
||||
fstab:
|
||||
mergerfs:
|
||||
---
|
||||
# ansible_user: "root"
|
||||
# ansible_host: 192.168.20.14
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
# ansible_become_pass: "{{ vault.pve.inko.root.sudo }}"
|
||||
#
|
||||
# host:
|
||||
# hostname: "inko"
|
||||
# ip: "{{ ansible_host }}"
|
||||
|
||||
10
host_vars/k3s-agent00.yml
Normal file
10
host_vars/k3s-agent00.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.25
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.agent00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent00"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-agent01.yml
Normal file
10
host_vars/k3s-agent01.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.26
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.agent01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent01"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-agent02.yml
Normal file
10
host_vars/k3s-agent02.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.27
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.agent02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent02"
|
||||
ip: "{{ ansible_host }}"
|
||||
9
host_vars/k3s-loadbalancer.yml
Normal file
9
host_vars/k3s-loadbalancer.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.22
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.loadbalancer.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-loadbalancer"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-longhorn00.yml
Normal file
10
host_vars/k3s-longhorn00.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.32
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.longhorn00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn00"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-longhorn01.yml
Normal file
10
host_vars/k3s-longhorn01.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.33
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.longhorn01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn01"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-longhorn02.yml
Normal file
10
host_vars/k3s-longhorn02.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.31
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.longhorn02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn02"
|
||||
ip: "{{ ansible_host }}"
|
||||
9
host_vars/k3s-postgres.yml
Normal file
9
host_vars/k3s-postgres.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.23
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.postgres.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-postgres"
|
||||
ip: "{{ ansible_host }}"
|
||||
9
host_vars/k3s-server00.yml
Normal file
9
host_vars/k3s-server00.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.21
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.server00.sudo }}"
|
||||
host:
|
||||
hostname: "k3s-server00"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-server01.yml
Normal file
10
host_vars/k3s-server01.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.24
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.server01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-server01"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/k3s-server02.yml
Normal file
10
host_vars/k3s-server02.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.30
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault_k3s.server02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-server02"
|
||||
ip: "{{ ansible_host }}"
|
||||
10
host_vars/lulu.yml
Normal file
10
host_vars/lulu.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# ansible_user: "root"
|
||||
# ansible_host: 192.168.20.28
|
||||
# ansible_port: 22
|
||||
# ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
# ansible_become_pass: "{{ vault.pve.lulu.root.sudo }}"
|
||||
#
|
||||
# host:
|
||||
# hostname: "lulu"
|
||||
# ip: "{{ ansible_host }}"
|
||||
@@ -1,20 +0,0 @@
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 202.61.207.139
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: '{{ pk_path }}'
|
||||
ansible_become_pass: '{{ vault.mii.sudo }}'
|
||||
|
||||
host:
|
||||
hostname: "mii"
|
||||
ip: "192.168.200.2"
|
||||
backblaze:
|
||||
account: "{{ vault.mii.backblaze.account }}"
|
||||
key: "{{ vault.mii.backblaze.key }}"
|
||||
remote: "remote:mii-tudattr-dev"
|
||||
password: "{{ vault.mii.rclone.password }}"
|
||||
password2: "{{ vault.mii.rclone.password2 }}"
|
||||
paths:
|
||||
- "{{ docker_compose_dir }}"
|
||||
- "{{ docker_dir }}"
|
||||
fstab:
|
||||
mergerfs:
|
||||
@@ -1,23 +0,0 @@
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.13
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: '{{ pk_path }}'
|
||||
ansible_become_pass: '{{ vault.naruto.sudo }}'
|
||||
|
||||
host:
|
||||
hostname: "naruto"
|
||||
ip: "{{ ansible_host }}"
|
||||
backblaze:
|
||||
account: "{{ vault.naruto.backblaze.account }}"
|
||||
key: "{{ vault.naruto.backblaze.key }}"
|
||||
remote: "remote:naruto-tudattr-dev"
|
||||
password: "{{ vault.naruto.rclone.password }}"
|
||||
password2: "{{ vault.naruto.rclone.password2 }}"
|
||||
paths:
|
||||
- "{{ docker_compose_dir }}"
|
||||
- "{{ docker_dir }}"
|
||||
fstab:
|
||||
mergerfs:
|
||||
gitea:
|
||||
runner:
|
||||
token: "{{ vault.naruto.gitea.runner.token }}"
|
||||
@@ -1,23 +0,0 @@
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.11
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: '{{ pk_path }}'
|
||||
ansible_become_pass: '{{ vault.pi.sudo }}'
|
||||
|
||||
host:
|
||||
hostname: "pi"
|
||||
ip: "{{ ansible_host }}"
|
||||
backblaze:
|
||||
account: "{{ vault.pi.backblaze.account }}"
|
||||
key: "{{ vault.pi.backblaze.key }}"
|
||||
remote: "remote:pi-tudattr-dev"
|
||||
password: "{{ vault.pi.rclone.password }}"
|
||||
password2: "{{ vault.pi.rclone.password2 }}"
|
||||
paths:
|
||||
- "{{ docker_compose_dir }}"
|
||||
- "{{ docker_dir }}"
|
||||
fstab:
|
||||
mergerfs:
|
||||
gitea:
|
||||
runner:
|
||||
token: "{{ vault.pi.gitea.runner.token }}"
|
||||
17
pi.yml
17
pi.yml
@@ -1,17 +0,0 @@
|
||||
---
|
||||
- name: Set up Raspberry Pis
|
||||
hosts: pi
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: backblaze
|
||||
tags:
|
||||
- backblaze
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
- role: docker
|
||||
tags:
|
||||
- docker
|
||||
@@ -1,20 +1,19 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: mii
|
||||
hosts: db
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: backblaze
|
||||
- role: postgres
|
||||
tags:
|
||||
- backblaze
|
||||
- postgres
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
- role: docker
|
||||
- role: postgres_exporter
|
||||
tags:
|
||||
- docker
|
||||
- role: wireguard
|
||||
tags:
|
||||
- wireguard
|
||||
- postgres_exporter
|
||||
13
playbooks/docker-host.yml
Normal file
13
playbooks/docker-host.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: docker_host
|
||||
gather_facts: true
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: docker_host
|
||||
tags:
|
||||
- docker_host
|
||||
15
playbooks/docker-lb.yml
Normal file
15
playbooks/docker-lb.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Set up reverse proxy for docker
|
||||
hosts: docker
|
||||
gather_facts: true
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: inventory_hostname in groups["docker_lb"]
|
||||
- role: reverse_proxy
|
||||
tags:
|
||||
- reverse_proxy
|
||||
when: inventory_hostname in groups["docker_lb"]
|
||||
5
playbooks/docker.yml
Normal file
5
playbooks/docker.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Setup Docker Hosts
|
||||
ansible.builtin.import_playbook: docker-host.yml
|
||||
- name: Setup Docker load balancer
|
||||
ansible.builtin.import_playbook: docker-lb.yml
|
||||
31
playbooks/k3s-agents.yml
Normal file
31
playbooks/k3s-agents.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
- name: Set up Agents
|
||||
hosts: k3s_nodes
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
pre_tasks:
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
tags:
|
||||
- common
|
||||
- role: k3s_agent
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
tags:
|
||||
- k3s_agent
|
||||
- role: node_exporter
|
||||
when: inventory_hostname in groups["k3s_agent"]
|
||||
tags:
|
||||
- node_exporter
|
||||
@@ -1,14 +1,16 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: inko
|
||||
hosts: k3s_server
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: power_management
|
||||
- role: k3s_server
|
||||
tags:
|
||||
- power_management
|
||||
- k3s_server
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
31
playbooks/k3s-storage.yml
Normal file
31
playbooks/k3s-storage.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
- name: Set up storage
|
||||
hosts: k3s_nodes
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
pre_tasks:
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
tags:
|
||||
- common
|
||||
- role: k3s_storage
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
tags:
|
||||
- k3s_storage
|
||||
- role: node_exporter
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
tags:
|
||||
- node_exporter
|
||||
@@ -1,17 +1,16 @@
|
||||
---
|
||||
- name: Set up Servers
|
||||
hosts: naruto
|
||||
hosts: loadbalancer
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
- role: samba
|
||||
- role: loadbalancer
|
||||
tags:
|
||||
- samba
|
||||
- loadbalancer
|
||||
- role: node_exporter
|
||||
tags:
|
||||
- node_exporter
|
||||
- role: smart_exporter
|
||||
tags:
|
||||
- smart_exporter
|
||||
17
playbooks/proxmox.yml
Normal file
17
playbooks/proxmox.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Run proxmox vm playbook
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
vars:
|
||||
is_localhost: "{{ inventory_hostname == '127.0.0.1' }}"
|
||||
is_proxmox_node: "{{ 'proxmox_nodes' in group_names }}"
|
||||
roles:
|
||||
- role: common
|
||||
tags:
|
||||
- common
|
||||
when: not is_localhost
|
||||
- role: proxmox
|
||||
tags:
|
||||
- proxmox
|
||||
@@ -1,9 +0,0 @@
|
||||
[server]
|
||||
aya01
|
||||
|
||||
[raspberry]
|
||||
pi
|
||||
naruto
|
||||
|
||||
[vps]
|
||||
mii
|
||||
63
production.ini
Normal file
63
production.ini
Normal file
@@ -0,0 +1,63 @@
|
||||
[proxmox]
|
||||
127.0.0.1 ansible_connection=local
|
||||
|
||||
[proxmox:children]
|
||||
proxmox_nodes
|
||||
|
||||
[proxmox_nodes]
|
||||
aya01
|
||||
lulu
|
||||
inko
|
||||
naruto01
|
||||
|
||||
[k3s]
|
||||
k3s-postgres
|
||||
k3s-loadbalancer
|
||||
k3s-server[00:02]
|
||||
k3s-agent[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[vm]
|
||||
k3s-postgres
|
||||
k3s-loadbalancer
|
||||
k3s-agent[00:02]
|
||||
k3s-server[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
# docker-host[00:01]
|
||||
|
||||
[k3s_nodes]
|
||||
k3s-server[00:02]
|
||||
k3s-agent[00:02]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[docker]
|
||||
docker-host01
|
||||
docker-host10
|
||||
docker-host12
|
||||
docker-lb
|
||||
|
||||
[vps]
|
||||
mii
|
||||
|
||||
[k3s_server]
|
||||
k3s-server[00:02]
|
||||
|
||||
[k3s_agent]
|
||||
k3s-agent[00:02]
|
||||
|
||||
[k3s_storage]
|
||||
k3s-longhorn[00:02]
|
||||
|
||||
[db]
|
||||
k3s-postgres
|
||||
|
||||
[loadbalancer]
|
||||
k3s-loadbalancer
|
||||
|
||||
[docker_host]
|
||||
docker-host01
|
||||
docker-host10
|
||||
docker-host12
|
||||
|
||||
[docker_lb]
|
||||
docker-lb
|
||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
certifi==2025.1.31
|
||||
charset-normalizer==3.4.1
|
||||
idna==3.10
|
||||
nc-dnsapi==0.1.3
|
||||
proxmoxer==2.2.0
|
||||
requests==2.32.3
|
||||
urllib3==2.3.0
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- name: Shut down docker
|
||||
systemd:
|
||||
name: docker
|
||||
state: stopped
|
||||
become: true
|
||||
|
||||
# - name: Backing up for "{{ inventory_hostname }}"
|
||||
# shell:
|
||||
# cmd: "rclone sync {{ item }} secret:{{ item }} --transfers 16"
|
||||
# loop: "{{ host.backblaze.paths }}"
|
||||
# become: true
|
||||
|
||||
- name: Backing up for "{{ inventory_hostname }}"
|
||||
shell:
|
||||
cmd: "rclone sync {{ item }} secret:{{ item }} --skip-links"
|
||||
loop: "{{ host.backblaze.paths }}"
|
||||
become: true
|
||||
|
||||
- name: Restart docker
|
||||
systemd:
|
||||
name: docker
|
||||
state: started
|
||||
become: true
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
- name: Create rclone config folder
|
||||
file:
|
||||
path: "{{ rclone_config }}"
|
||||
owner: '0'
|
||||
group: '0'
|
||||
mode: '700'
|
||||
state: directory
|
||||
become: true
|
||||
|
||||
- name: Copy "rclone.conf"
|
||||
template:
|
||||
src: "rclone.conf.j2"
|
||||
dest: "{{ rclone_config }}/rclone.conf"
|
||||
owner: '0'
|
||||
group: '0'
|
||||
mode: '400'
|
||||
become: true
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
- name: Update and upgrade packages
|
||||
apt:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
become: true
|
||||
|
||||
- name: Install rclone
|
||||
apt:
|
||||
name: "rclone"
|
||||
state: present
|
||||
become: true
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
|
||||
- include_tasks: install.yml
|
||||
- include_tasks: config.yml
|
||||
- include_tasks: backup.yml
|
||||
@@ -1,10 +0,0 @@
|
||||
[remote]
|
||||
type = b2
|
||||
account = {{ host.backblaze.account }}
|
||||
key = {{ host.backblaze.key }}
|
||||
|
||||
[secret]
|
||||
type = crypt
|
||||
remote = {{ host.backblaze.remote }}
|
||||
password = {{ host.backblaze.password }}
|
||||
password2 = {{ host.backblaze.password2 }}
|
||||
4
roles/common/files/bash/bash_aliases
Normal file
4
roles/common/files/bash/bash_aliases
Normal file
@@ -0,0 +1,4 @@
|
||||
alias cat=batcat
|
||||
alias vim=nvim
|
||||
alias fd=fdfind
|
||||
alias ls=eza
|
||||
@@ -1,7 +1,7 @@
|
||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
case $- in
|
||||
*i*) ;;
|
||||
*) return;;
|
||||
*i*) ;;
|
||||
*) return ;;
|
||||
esac
|
||||
HISTCONTROL=ignoreboth
|
||||
shopt -s histappend
|
||||
@@ -9,39 +9,38 @@ HISTSIZE=1000
|
||||
HISTFILESIZE=2000
|
||||
shopt -s checkwinsize
|
||||
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
case "$TERM" in
|
||||
xterm-color|*-256color) color_prompt=yes;;
|
||||
xterm-color | *-256color) color_prompt=yes ;;
|
||||
esac
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
if [ "$color_prompt" = yes ]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
fi
|
||||
unset color_prompt force_color_prompt
|
||||
case "$TERM" in
|
||||
xterm*|rxvt*)
|
||||
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
xterm* | rxvt*)
|
||||
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
if [ -x /usr/bin/dircolors ]; then
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
fi
|
||||
|
||||
if [ -f ~/.bash_aliases ]; then
|
||||
. ~/.bash_aliases
|
||||
. ~/.bash_aliases
|
||||
fi
|
||||
|
||||
if ! shopt -oq posix; then
|
||||
@@ -52,5 +51,6 @@ if ! shopt -oq posix; then
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
. "$HOME/.cargo/env"
|
||||
if [ -f /etc/profile ]; then
|
||||
. /etc/profile
|
||||
fi
|
||||
80
roles/common/files/ghostty/infocmp
Normal file
80
roles/common/files/ghostty/infocmp
Normal file
@@ -0,0 +1,80 @@
|
||||
xterm-ghostty|ghostty|Ghostty,
|
||||
am, bce, ccc, hs, km, mc5i, mir, msgr, npc, xenl, AX, Su, Tc, XT, fullkbd,
|
||||
colors#0x100, cols#80, it#8, lines#24, pairs#0x7fff,
|
||||
acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
|
||||
bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, civis=\E[?25l,
|
||||
clear=\E[H\E[2J, cnorm=\E[?12l\E[?25h, cr=\r,
|
||||
csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
|
||||
cud=\E[%p1%dB, cud1=\n, cuf=\E[%p1%dC, cuf1=\E[C,
|
||||
cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
|
||||
cvvis=\E[?12;25h, dch=\E[%p1%dP, dch1=\E[P, dim=\E[2m,
|
||||
dl=\E[%p1%dM, dl1=\E[M, dsl=\E]2;\007, ech=\E[%p1%dX,
|
||||
ed=\E[J, el=\E[K, el1=\E[1K, flash=\E[?5h$<100/>\E[?5l,
|
||||
fsl=^G, home=\E[H, hpa=\E[%i%p1%dG, ht=^I, hts=\EH,
|
||||
ich=\E[%p1%d@, ich1=\E[@, il=\E[%p1%dL, il1=\E[L, ind=\n,
|
||||
indn=\E[%p1%dS,
|
||||
initc=\E]4;%p1%d;rgb:%p2%{255}%*%{1000}%/%2.2X/%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\E\\,
|
||||
invis=\E[8m, kDC=\E[3;2~, kEND=\E[1;2F, kHOM=\E[1;2H,
|
||||
kIC=\E[2;2~, kLFT=\E[1;2D, kNXT=\E[6;2~, kPRV=\E[5;2~,
|
||||
kRIT=\E[1;2C, kbs=^?, kcbt=\E[Z, kcub1=\EOD, kcud1=\EOB,
|
||||
kcuf1=\EOC, kcuu1=\EOA, kdch1=\E[3~, kend=\EOF, kent=\EOM,
|
||||
kf1=\EOP, kf10=\E[21~, kf11=\E[23~, kf12=\E[24~,
|
||||
kf13=\E[1;2P, kf14=\E[1;2Q, kf15=\E[1;2R, kf16=\E[1;2S,
|
||||
kf17=\E[15;2~, kf18=\E[17;2~, kf19=\E[18;2~, kf2=\EOQ,
|
||||
kf20=\E[19;2~, kf21=\E[20;2~, kf22=\E[21;2~,
|
||||
kf23=\E[23;2~, kf24=\E[24;2~, kf25=\E[1;5P, kf26=\E[1;5Q,
|
||||
kf27=\E[1;5R, kf28=\E[1;5S, kf29=\E[15;5~, kf3=\EOR,
|
||||
kf30=\E[17;5~, kf31=\E[18;5~, kf32=\E[19;5~,
|
||||
kf33=\E[20;5~, kf34=\E[21;5~, kf35=\E[23;5~,
|
||||
kf36=\E[24;5~, kf37=\E[1;6P, kf38=\E[1;6Q, kf39=\E[1;6R,
|
||||
kf4=\EOS, kf40=\E[1;6S, kf41=\E[15;6~, kf42=\E[17;6~,
|
||||
kf43=\E[18;6~, kf44=\E[19;6~, kf45=\E[20;6~,
|
||||
kf46=\E[21;6~, kf47=\E[23;6~, kf48=\E[24;6~,
|
||||
kf49=\E[1;3P, kf5=\E[15~, kf50=\E[1;3Q, kf51=\E[1;3R,
|
||||
kf52=\E[1;3S, kf53=\E[15;3~, kf54=\E[17;3~,
|
||||
kf55=\E[18;3~, kf56=\E[19;3~, kf57=\E[20;3~,
|
||||
kf58=\E[21;3~, kf59=\E[23;3~, kf6=\E[17~, kf60=\E[24;3~,
|
||||
kf61=\E[1;4P, kf62=\E[1;4Q, kf63=\E[1;4R, kf7=\E[18~,
|
||||
kf8=\E[19~, kf9=\E[20~, khome=\EOH, kich1=\E[2~,
|
||||
kind=\E[1;2B, kmous=\E[<, knp=\E[6~, kpp=\E[5~,
|
||||
kri=\E[1;2A, oc=\E]104\007, op=\E[39;49m, rc=\E8,
|
||||
rep=%p1%c\E[%p2%{1}%-%db, rev=\E[7m, ri=\EM,
|
||||
rin=\E[%p1%dT, ritm=\E[23m, rmacs=\E(B, rmam=\E[?7l,
|
||||
rmcup=\E[?1049l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
|
||||
rmul=\E[24m, rs1=\E]\E\\\Ec, sc=\E7,
|
||||
setab=\E[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m,
|
||||
setaf=\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m,
|
||||
sgr=%?%p9%t\E(0%e\E(B%;\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m,
|
||||
sgr0=\E(B\E[m, sitm=\E[3m, smacs=\E(0, smam=\E[?7h,
|
||||
smcup=\E[?1049h, smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m,
|
||||
smul=\E[4m, tbc=\E[3g, tsl=\E]2;, u6=\E[%i%d;%dR, u7=\E[6n,
|
||||
u8=\E[?%[;0123456789]c, u9=\E[c, vpa=\E[%i%p1%dd,
|
||||
BD=\E[?2004l, BE=\E[?2004h, Clmg=\E[s,
|
||||
Cmg=\E[%i%p1%d;%p2%ds, Dsmg=\E[?69l, E3=\E[3J,
|
||||
Enmg=\E[?69h, Ms=\E]52;%p1%s;%p2%s\007, PE=\E[201~,
|
||||
PS=\E[200~, RV=\E[>c, Se=\E[2 q,
|
||||
Setulc=\E[58:2::%p1%{65536}%/%d:%p1%{256}%/%{255}%&%d:%p1%{255}%&%d%;m,
|
||||
Smulx=\E[4:%p1%dm, Ss=\E[%p1%d q,
|
||||
Sync=\E[?2026%?%p1%{1}%-%tl%eh%;,
|
||||
XM=\E[?1006;1000%?%p1%{1}%=%th%el%;, XR=\E[>0q,
|
||||
fd=\E[?1004l, fe=\E[?1004h, kDC3=\E[3;3~, kDC4=\E[3;4~,
|
||||
kDC5=\E[3;5~, kDC6=\E[3;6~, kDC7=\E[3;7~, kDN=\E[1;2B,
|
||||
kDN3=\E[1;3B, kDN4=\E[1;4B, kDN5=\E[1;5B, kDN6=\E[1;6B,
|
||||
kDN7=\E[1;7B, kEND3=\E[1;3F, kEND4=\E[1;4F,
|
||||
kEND5=\E[1;5F, kEND6=\E[1;6F, kEND7=\E[1;7F,
|
||||
kHOM3=\E[1;3H, kHOM4=\E[1;4H, kHOM5=\E[1;5H,
|
||||
kHOM6=\E[1;6H, kHOM7=\E[1;7H, kIC3=\E[2;3~, kIC4=\E[2;4~,
|
||||
kIC5=\E[2;5~, kIC6=\E[2;6~, kIC7=\E[2;7~, kLFT3=\E[1;3D,
|
||||
kLFT4=\E[1;4D, kLFT5=\E[1;5D, kLFT6=\E[1;6D,
|
||||
kLFT7=\E[1;7D, kNXT3=\E[6;3~, kNXT4=\E[6;4~,
|
||||
kNXT5=\E[6;5~, kNXT6=\E[6;6~, kNXT7=\E[6;7~,
|
||||
kPRV3=\E[5;3~, kPRV4=\E[5;4~, kPRV5=\E[5;5~,
|
||||
kPRV6=\E[5;6~, kPRV7=\E[5;7~, kRIT3=\E[1;3C,
|
||||
kRIT4=\E[1;4C, kRIT5=\E[1;5C, kRIT6=\E[1;6C,
|
||||
kRIT7=\E[1;7C, kUP=\E[1;2A, kUP3=\E[1;3A, kUP4=\E[1;4A,
|
||||
kUP5=\E[1;5A, kUP6=\E[1;6A, kUP7=\E[1;7A, kxIN=\E[I,
|
||||
kxOUT=\E[O, rmxx=\E[29m, rv=\E\\[[0-9]+;[0-9]+;[0-9]+c,
|
||||
setrgbb=\E[48:2:%p1%d:%p2%d:%p3%dm,
|
||||
setrgbf=\E[38:2:%p1%d:%p2%d:%p3%dm, smxx=\E[9m,
|
||||
xm=\E[<%i%p3%d;%p1%d;%p2%d;%?%p4%tM%em%;,
|
||||
xr=\EP>\\|[ -~]+a\E\\,
|
||||
19
roles/common/files/ssh/root/sshd_config
Normal file
19
roles/common/files/ssh/root/sshd_config
Normal file
@@ -0,0 +1,19 @@
|
||||
Protocol 2
|
||||
PermitRootLogin yes
|
||||
MaxAuthTries 3
|
||||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
UsePAM yes
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding yes
|
||||
X11Forwarding no
|
||||
PrintMotd no
|
||||
TCPKeepAlive no
|
||||
ClientAliveCountMax 2
|
||||
TrustedUserCAKeys /etc/ssh/vault-ca.pub
|
||||
UseDNS yes
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
18
roles/common/files/ssh/user/sshd_config
Normal file
18
roles/common/files/ssh/user/sshd_config
Normal file
@@ -0,0 +1,18 @@
|
||||
Protocol 2
|
||||
PermitRootLogin no
|
||||
MaxAuthTries 3
|
||||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
UsePAM yes
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding no
|
||||
X11Forwarding no
|
||||
PrintMotd no
|
||||
TCPKeepAlive no
|
||||
ClientAliveCountMax 2
|
||||
TrustedUserCAKeys /etc/ssh/vault-ca.pub
|
||||
UseDNS yes
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
1
roles/common/files/ssh/vault-ca.pub
Normal file
1
roles/common/files/ssh/vault-ca.pub
Normal file
@@ -0,0 +1 @@
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxIbkko72kVSfYDjJpiMH9SjHUGqBn3MbBvmotsPQhybFgnnkBpX/3fM9olP+Z6PGsmbOEs0fOjPS6uY5hjKcKsyHdZfS6cA4wjY/DL8fwATAW5FCDBtMpdg2/sb8j9jutHHs4sQeRBolVwKcv+ZAaJNnOzNHwxVUfT9bNwShthnAFjkY7oZo657FRomlkDJjmGQuratP0veKA8jYzqqPWwWidTGQerLYTyJ3Z8pbQa5eN7svrvabjjDLbVTDESE8st9WEmwvAwoj7Kz+WovCy0Uz7LRFVmaRiapM8SXtPPUC0xfyzAB3NxwBtxizdUMlShvLcL6cujcUBMulVMpsqEaOESTpmVTrMJhnJPZG/3j9ziGoYIa6hMj1J9/qLQ5dDNVVXMxw99G31x0LJoy12IE90P4Cahux8iN0Cp4oB4+B6/qledxs1fcRzsnQY/ickjKhqcJwgHzsnwjDkeYRaYte5x4f/gJ77kA20nPto7mxr2mhWot/i9B1KlMURVXOH/q4nrzhJ0hPJpM0UtzQ58TmzE4Osf/B5yoe8V//6XnelbmG/nKCIzg12d7PvaLjbFMn8IgOwDMRlip+vpyadRr/+pCawrfo4vLF7BsnJ84aoByIpbwaysgaYHtjfZWImorMVkgviC4O6Hn9/ZiLNze2A9DaNUnLVJ0nYNbmv9Q==
|
||||
6
roles/common/handlers/main.yml
Normal file
6
roles/common/handlers/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Restart sshd
|
||||
service:
|
||||
name: sshd
|
||||
state: restarted
|
||||
become: yes
|
||||
@@ -1,10 +1,24 @@
|
||||
---
|
||||
- name: Copy .bashrc
|
||||
template:
|
||||
src: templates/common/bash/bashrc.j2
|
||||
dest: "/home/{{ user }}/.bashrc"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
mode: 0644
|
||||
become: yes
|
||||
register: sshd
|
||||
- name: Copy bash-configs
|
||||
ansible.builtin.template:
|
||||
src: "files/bash/{{ item }}"
|
||||
dest: "{{ ansible_env.HOME }}/.{{ item }}"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "644"
|
||||
loop:
|
||||
- bashrc
|
||||
- bash_aliases
|
||||
|
||||
- name: Copy ghostty infocmp
|
||||
ansible.builtin.copy:
|
||||
src: files/ghostty/infocmp
|
||||
dest: "{{ ansible_env.HOME }}/ghostty"
|
||||
owner: "{{ ansible_user_id }}"
|
||||
group: "{{ ansible_user_id }}"
|
||||
mode: "0644"
|
||||
register: ghostty_terminfo
|
||||
|
||||
- name: Compile ghostty terminalinfo
|
||||
ansible.builtin.command: "tic -x {{ ansible_env.HOME }}/ghostty"
|
||||
when: ghostty_terminfo.changed
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
- name: Update and upgrade packages
|
||||
apt:
|
||||
update_cache: yes
|
||||
upgrade: yes
|
||||
autoremove: yes
|
||||
become: yes
|
||||
|
||||
- name: Install extra packages
|
||||
apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
become: yes
|
||||
94
roles/common/tasks/extra_packages.yml
Normal file
94
roles/common/tasks/extra_packages.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: Ensure /etc/apt/keyrings directory exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Download and save Gierens repository GPG key
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/eza-community/eza/main/deb.asc
|
||||
dest: /etc/apt/keyrings/gierens.asc
|
||||
mode: "0644"
|
||||
become: true
|
||||
|
||||
- name: Add Gierens repository to apt sources
|
||||
ansible.builtin.apt_repository:
|
||||
repo: "deb [signed-by=/etc/apt/keyrings/gierens.asc] http://deb.gierens.de stable main"
|
||||
state: present
|
||||
update_cache: true
|
||||
become: true
|
||||
|
||||
- name: Install eza package
|
||||
ansible.builtin.apt:
|
||||
name: eza
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Install bottom package
|
||||
ansible.builtin.apt:
|
||||
deb: https://github.com/ClementTsang/bottom/releases/download/0.9.6/bottom_0.9.6_amd64.deb
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Check if Neovim is already installed
|
||||
ansible.builtin.command: "which nvim"
|
||||
register: neovim_installed
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Download Neovim AppImage
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/neovim/neovim/releases/download/v0.10.0/nvim.appimage
|
||||
dest: /tmp/nvim.appimage
|
||||
mode: "0755"
|
||||
when: neovim_installed.rc != 0
|
||||
register: download_result
|
||||
|
||||
- name: Extract Neovim AppImage
|
||||
ansible.builtin.command:
|
||||
cmd: "./nvim.appimage --appimage-extract"
|
||||
chdir: /tmp
|
||||
when: download_result.changed
|
||||
register: extract_result
|
||||
|
||||
- name: Copy extracted Neovim files to /usr
|
||||
ansible.builtin.copy:
|
||||
src: /tmp/squashfs-root/usr/
|
||||
dest: /usr/
|
||||
remote_src: true
|
||||
mode: "0755"
|
||||
become: true
|
||||
when: extract_result.changed
|
||||
|
||||
- name: Clean up extracted Neovim files
|
||||
ansible.builtin.file:
|
||||
path: /tmp/squashfs-root
|
||||
state: absent
|
||||
when: extract_result.changed
|
||||
|
||||
- name: Remove Neovim AppImage
|
||||
ansible.builtin.file:
|
||||
path: /tmp/nvim.appimage
|
||||
state: absent
|
||||
when: download_result.changed
|
||||
|
||||
- name: Check if Neovim config directory already exists
|
||||
ansible.builtin.stat:
|
||||
path: ~/.config/nvim
|
||||
register: nvim_config
|
||||
|
||||
- name: Clone LazyVim starter to Neovim config directory
|
||||
ansible.builtin.git:
|
||||
repo: https://github.com/LazyVim/starter
|
||||
dest: ~/.config/nvim
|
||||
clone: true
|
||||
update: false
|
||||
when: not nvim_config.stat.exists
|
||||
|
||||
- name: Remove .git directory from Neovim config
|
||||
ansible.builtin.file:
|
||||
path: ~/.config/nvim/.git
|
||||
state: absent
|
||||
when: not nvim_config.stat.exists
|
||||
@@ -1,42 +0,0 @@
|
||||
---
|
||||
- name: Install dependencies
|
||||
apt:
|
||||
name: "mergerfs"
|
||||
state: present
|
||||
become: yes
|
||||
|
||||
- name: Create mount folders
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
loop: "{{ host.fstab if host.fstab is iterable else []}}"
|
||||
become: true
|
||||
|
||||
- name: Create fstab entries
|
||||
mount:
|
||||
src: "UUID={{ item.uuid }}"
|
||||
path: "{{ item.path }}"
|
||||
fstype: "{{ item.type }}"
|
||||
state: present
|
||||
backup: true
|
||||
loop: "{{ host.fstab if host.fstab is iterable else []}}"
|
||||
become: true
|
||||
register: fstab
|
||||
|
||||
- name: Create/mount mergerfs
|
||||
mount:
|
||||
src: "{{ item.branches | join(':') }}"
|
||||
path: "{{ item.path }}"
|
||||
fstype: "{{ item.type }}"
|
||||
opts: "{{ item.opts | join(',') }}"
|
||||
state: present
|
||||
backup: true
|
||||
become: true
|
||||
loop: "{{ host.mergerfs if host.mergerfs is iterable else []}}"
|
||||
register: fstab
|
||||
|
||||
- name: Mount all disks
|
||||
command: mount -a
|
||||
become: true
|
||||
when: fstab.changed
|
||||
|
||||
14
roles/common/tasks/hostname.yml
Normal file
14
roles/common/tasks/hostname.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Set a hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
become: true
|
||||
|
||||
- name: Update /etc/hosts to reflect the new hostname
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: '^127\.0\.1\.1'
|
||||
line: "127.0.1.1 {{ inventory_hostname }}"
|
||||
state: present
|
||||
backup: true
|
||||
become: true
|
||||
@@ -1,6 +1,13 @@
|
||||
---
|
||||
- include_tasks: time.yml
|
||||
- include_tasks: essential.yml
|
||||
- include_tasks: bash.yml
|
||||
- include_tasks: sshd.yml
|
||||
- include_tasks: fstab.yml
|
||||
- name: Configure Time
|
||||
ansible.builtin.include_tasks: time.yml
|
||||
- name: Configure Packages
|
||||
ansible.builtin.include_tasks: packages.yml
|
||||
- name: Configure Hostname
|
||||
ansible.builtin.include_tasks: hostname.yml
|
||||
- name: Configure Extra-Packages
|
||||
ansible.builtin.include_tasks: extra_packages.yml
|
||||
- name: Configure Bash
|
||||
ansible.builtin.include_tasks: bash.yml
|
||||
- name: Configure SSH
|
||||
ansible.builtin.include_tasks: sshd.yml
|
||||
|
||||
28
roles/common/tasks/packages.yml
Normal file
28
roles/common/tasks/packages.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Update and upgrade packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Install base packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Update and upgrade packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
- name: Install base packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
when: ansible_user_id == "root"
|
||||
@@ -1,23 +1,28 @@
|
||||
---
|
||||
- name: Copy sshd_config
|
||||
template:
|
||||
src: templates/common/ssh/sshd_config
|
||||
- name: Copy user sshd_config
|
||||
ansible.builtin.template:
|
||||
src: files/ssh/user/sshd_config
|
||||
dest: /etc/ssh/sshd_config
|
||||
mode: 0644
|
||||
become: yes
|
||||
register: sshd
|
||||
mode: "644"
|
||||
backup: true
|
||||
notify:
|
||||
- Restart sshd
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Copy root sshd_config
|
||||
ansible.builtin.template:
|
||||
src: files/ssh/root/sshd_config
|
||||
dest: /etc/ssh/sshd_config
|
||||
mode: "644"
|
||||
backup: true
|
||||
notify:
|
||||
- Restart sshd
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
- name: Copy pubkey
|
||||
copy:
|
||||
content: "{{ pubkey }}"
|
||||
dest: "/home/{{ user }}/.ssh/authorized_keys"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
ansible.builtin.copy:
|
||||
src: files/ssh/vault-ca.pub
|
||||
dest: "/etc/ssh/vault-ca.pub"
|
||||
mode: "644"
|
||||
|
||||
- name: Restart sshd
|
||||
service:
|
||||
name: "sshd"
|
||||
state: "restarted"
|
||||
become: yes
|
||||
when: sshd.changed
|
||||
become: true
|
||||
|
||||
@@ -2,3 +2,10 @@
|
||||
- name: Set timezone to "{{ timezone }}"
|
||||
community.general.timezone:
|
||||
name: "{{ timezone }}"
|
||||
become: true
|
||||
when: ansible_user_id != "root"
|
||||
|
||||
- name: Set timezone to "{{ timezone }}"
|
||||
community.general.timezone:
|
||||
name: "{{ timezone }}"
|
||||
when: ansible_user_id == "root"
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
# $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
|
||||
|
||||
# This is the sshd server system-wide configuration file. See
|
||||
# sshd_config(5) for more information.
|
||||
|
||||
# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
|
||||
|
||||
# The strategy used for options in the default sshd_config shipped with
|
||||
# OpenSSH is to specify options with their default value where
|
||||
# possible, but leave them commented. Uncommented options override the
|
||||
# default value.
|
||||
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
Protocol 2
|
||||
#Port 22
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
#HostKey /etc/ssh/ssh_host_rsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Ciphers and keying
|
||||
#RekeyLimit default none
|
||||
|
||||
# Logging
|
||||
#SyslogFacility AUTH
|
||||
#LogLevel INFO
|
||||
|
||||
# Authentication:
|
||||
|
||||
#LoginGraceTime 2m
|
||||
PermitRootLogin no
|
||||
#StrictModes yes
|
||||
MaxAuthTries 3
|
||||
#MaxSessions 10
|
||||
|
||||
PubkeyAuthentication yes
|
||||
|
||||
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
|
||||
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
|
||||
|
||||
#AuthorizedPrincipalsFile none
|
||||
|
||||
#AuthorizedKeysCommand none
|
||||
#AuthorizedKeysCommandUser nobody
|
||||
|
||||
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
|
||||
#HostbasedAuthentication no
|
||||
# Change to yes if you don't trust ~/.ssh/known_hosts for
|
||||
# HostbasedAuthentication
|
||||
#IgnoreUserKnownHosts no
|
||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
||||
#IgnoreRhosts yes
|
||||
|
||||
# To disable tunneled clear text passwords, change to no here!
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
|
||||
# Change to yes to enable challenge-response passwords (beware issues with
|
||||
# some PAM modules and threads)
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
# Kerberos options
|
||||
#KerberosAuthentication no
|
||||
#KerberosOrLocalPasswd yes
|
||||
#KerberosTicketCleanup yes
|
||||
#KerberosGetAFSToken no
|
||||
|
||||
# GSSAPI options
|
||||
#GSSAPIAuthentication no
|
||||
#GSSAPICleanupCredentials yes
|
||||
#GSSAPIStrictAcceptorCheck yes
|
||||
#GSSAPIKeyExchange no
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
# and session processing. If this is enabled, PAM authentication will
|
||||
# be allowed through the ChallengeResponseAuthentication and
|
||||
# PasswordAuthentication. Depending on your PAM configuration,
|
||||
# PAM authentication via ChallengeResponseAuthentication may bypass
|
||||
# the setting of "PermitRootLogin without-password".
|
||||
# If you just want the PAM account and session checks to run without
|
||||
# PAM authentication, then enable this but set PasswordAuthentication
|
||||
# and ChallengeResponseAuthentication to 'no'.
|
||||
UsePAM yes
|
||||
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding no
|
||||
#GatewayPorts no
|
||||
X11Forwarding no
|
||||
#X11DisplayOffset 10
|
||||
#X11UseLocalhost yes
|
||||
#PermitTTY yes
|
||||
PrintMotd no
|
||||
#PrintLastLog yes
|
||||
TCPKeepAlive no
|
||||
#PermitUserEnvironment no
|
||||
#Compression delayed
|
||||
#ClientAliveInterval 0
|
||||
ClientAliveCountMax 2
|
||||
UseDNS yes
|
||||
#PidFile /var/run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
#ChrootDirectory none
|
||||
#VersionAddendum none
|
||||
|
||||
# no default banner path
|
||||
#Banner none
|
||||
|
||||
# Allow client to pass locale environment variables
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
# override default of no subsystems
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# Example of overriding settings on a per-user basis
|
||||
#Match User anoncvs
|
||||
# X11Forwarding no
|
||||
# AllowTcpForwarding no
|
||||
# PermitTTY no
|
||||
# ForceCommand cvs server
|
||||
15
roles/common/vars/main.yml
Normal file
15
roles/common/vars/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
common_packages:
|
||||
- build-essential
|
||||
- curl
|
||||
- git
|
||||
- iperf3
|
||||
- neovim
|
||||
- rsync
|
||||
- smartmontools
|
||||
- sudo
|
||||
- systemd-timesyncd
|
||||
- tree
|
||||
- screen
|
||||
- bat
|
||||
- fd-find
|
||||
- ripgrep
|
||||
@@ -1,96 +0,0 @@
|
||||
---
|
||||
|
||||
# - include_tasks: zoneminder.yml
|
||||
# tags:
|
||||
# - zoneminder
|
||||
|
||||
- include_tasks: pihole.yml
|
||||
tags:
|
||||
- pihole
|
||||
|
||||
- include_tasks: syncthing.yml
|
||||
tags:
|
||||
- syncthing
|
||||
|
||||
# - include_tasks: softserve.yml
|
||||
# tags:
|
||||
# - softserve
|
||||
|
||||
- include_tasks: cupsd.yml
|
||||
tags:
|
||||
- cupsd
|
||||
|
||||
- include_tasks: kuma.yml
|
||||
tags:
|
||||
- kuma
|
||||
|
||||
# - include_tasks: traefik.yml
|
||||
# tags:
|
||||
# - traefik
|
||||
|
||||
- include_tasks: plex.yml
|
||||
tags:
|
||||
- plex
|
||||
- include_tasks: ddns.yml
|
||||
tags:
|
||||
- ddns
|
||||
|
||||
- include_tasks: homeassistant.yml
|
||||
tags:
|
||||
- homeassistant
|
||||
|
||||
- include_tasks: tautulli.yml
|
||||
tags:
|
||||
- tautulli
|
||||
|
||||
- include_tasks: sonarr.yml
|
||||
tags:
|
||||
- sonarr
|
||||
|
||||
- include_tasks: radarr.yml
|
||||
tags:
|
||||
- radarr
|
||||
|
||||
- include_tasks: lidarr.yml
|
||||
tags:
|
||||
- lidarr
|
||||
|
||||
- include_tasks: prowlarr.yml
|
||||
tags:
|
||||
- prowlarr
|
||||
|
||||
- include_tasks: bin.yml
|
||||
tags:
|
||||
- bin
|
||||
|
||||
- include_tasks: gluetun.yml
|
||||
tags:
|
||||
- gluetun
|
||||
|
||||
- include_tasks: qbit.yml
|
||||
tags:
|
||||
- qbit
|
||||
|
||||
- include_tasks: qbit_private.yml
|
||||
tags:
|
||||
- qbit_priv
|
||||
|
||||
- include_tasks: prometheus.yml
|
||||
tags:
|
||||
- prometheus
|
||||
|
||||
- include_tasks: grafana.yml
|
||||
tags:
|
||||
- grafana
|
||||
|
||||
- include_tasks: jellyfin.yml
|
||||
tags:
|
||||
- jellyfin
|
||||
|
||||
- include_tasks: gitea.yml
|
||||
tags:
|
||||
- gitea
|
||||
|
||||
- include_tasks: gitea-runner.yml
|
||||
tags:
|
||||
- gitea-runner
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Create bin-config directory
|
||||
file:
|
||||
path: "{{ bin_upload }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
@@ -1,19 +0,0 @@
|
||||
---
|
||||
- name: Create cupsd-config directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ cupsd_config }}"
|
||||
become: true
|
||||
|
||||
- name: Copy cupsd config
|
||||
template:
|
||||
owner: "{{ puid }}"
|
||||
src: "templates/aya01/cupsd/cupsd.conf"
|
||||
dest: "{{ cupsd_config }}/cupsd.conf"
|
||||
mode: '660'
|
||||
become: true
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
- name: Create ddns-config directory
|
||||
file:
|
||||
path: "{{ docker_dir }}/ddns-updater/data/"
|
||||
owner: 1000
|
||||
group: 1000
|
||||
mode: '700'
|
||||
state: directory
|
||||
|
||||
- name: Copy ddns-config
|
||||
template:
|
||||
owner: 1000
|
||||
src: "templates/{{host.hostname}}/ddns-updater/data/config.json"
|
||||
dest: "{{ docker_dir }}/ddns-updater/data/config.json"
|
||||
mode: '400'
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create gitea-runner directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ gitea.runner.volumes.data }}"
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Create gitea directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ gitea.volumes.data }}"
|
||||
- "{{ gitea.volumes.config }}"
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create gitlab-runner directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ gitlab.runner.volumes.config }}"
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create gitlab-config
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ gitlab.puid }}"
|
||||
group: "{{ gitlab.pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ gitlab.paths.config }}"
|
||||
- "{{ gitlab.paths.logs }}"
|
||||
- "{{ gitlab.paths.data }}"
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create gluetun-config directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '775'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ gluetun_config}}"
|
||||
become: true
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Create grafana data directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ grafana_puid }}"
|
||||
group: "{{ grafana_pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ grafana_data }}"
|
||||
- "{{ grafana_config }}"
|
||||
become: true
|
||||
|
||||
- name: Copy grafana config
|
||||
template:
|
||||
owner: "{{ grafana_puid }}"
|
||||
group: "{{ grafana_pgid }}"
|
||||
src: "templates/aya01/grafana/etc-grafana/grafana.ini.j2"
|
||||
dest: "{{ grafana_config }}/grafana.ini"
|
||||
mode: '644'
|
||||
become: true
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create homeassistant-config directory
|
||||
file:
|
||||
path: "{{ ha_config }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: true
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
- name: Create zoneminder user
|
||||
user:
|
||||
name: zm
|
||||
uid: 911
|
||||
shell: /bin/false
|
||||
become: true
|
||||
|
||||
- name: Create Zoneminder config directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: 911
|
||||
group: 911
|
||||
mode: '700'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ zoneminder_config }}"
|
||||
become: true
|
||||
|
||||
- name: Create Zoneminder data directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: 911
|
||||
group: 911
|
||||
mode: '755'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ zoneminder_data }}"
|
||||
become: true
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
- name: Create jellyfin-config directory
|
||||
file:
|
||||
path: "{{ jellyfin.config }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
|
||||
- name: Create jellyfin-cache directory
|
||||
file:
|
||||
path: "{{ jellyfin.cache }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
|
||||
- name: Create jellyfin media directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ jellyfin.media.tv }}"
|
||||
- "{{ jellyfin.media.movies }}"
|
||||
- "{{ jellyfin.media.music }}"
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
- name: Create kuma-config directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ kuma_config }}"
|
||||
become: true
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
- name: Create lidarr directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
become: yes
|
||||
loop:
|
||||
- "{{ lidarr_config }}"
|
||||
- "{{ lidarr_media }}"
|
||||
- "{{ lidarr_downloads }}"
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
- include_tasks: install.yml
|
||||
|
||||
- include_tasks: user_group_setup.yml
|
||||
|
||||
- name: Copy the compose file
|
||||
template:
|
||||
src: templates/{{ inventory_hostname }}/compose.yaml
|
||||
dest: "{{ docker_compose_dir }}/compose.yaml"
|
||||
register: compose
|
||||
|
||||
- include_tasks: "{{ inventory_hostname }}_compose.yml"
|
||||
tags:
|
||||
- reload_compose
|
||||
|
||||
- name: Update docker Images
|
||||
shell:
|
||||
cmd: "docker compose pull"
|
||||
chdir: "{{ docker_compose_dir }}"
|
||||
|
||||
- name: Rebuilding docker images
|
||||
shell:
|
||||
cmd: "docker compose up -d --build"
|
||||
chdir: "{{ docker_compose_dir }}"
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
|
||||
- include_tasks: nginx-proxy-manager.yml
|
||||
tags:
|
||||
- nginx
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
|
||||
- include_tasks: nginx-proxy-manager.yml
|
||||
tags:
|
||||
- nginx
|
||||
|
||||
- include_tasks: pihole.yml
|
||||
tags:
|
||||
- pihole
|
||||
|
||||
- include_tasks: gitea-runner.yml
|
||||
tags:
|
||||
- gitea-runner
|
||||
@@ -1,14 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create netdata dirs
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: 1000
|
||||
group: 1000
|
||||
mode: '777'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ netdata_config }}"
|
||||
- "{{ netdata_cache }}"
|
||||
- "{{ netdata_lib }}"
|
||||
become: true
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Create nginx-data directory
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
owner: "{{ puid }}"
|
||||
group: "{{ pgid }}"
|
||||
mode: '755'
|
||||
state: directory
|
||||
loop:
|
||||
- "{{ nginx.paths.letsencrypt }}"
|
||||
- "{{ nginx.paths.data }}"
|
||||
become: yes
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user