Added storage nodes for k3s
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>rewrite
parent
92e4b3bb27
commit
7d58de98d9
|
@ -3,7 +3,7 @@ db:
|
|||
user: "postgres"
|
||||
name: "k3s"
|
||||
user: "k3s"
|
||||
password: "{{ vault.k3s.db.password }}"
|
||||
password: "{{ vault.k3s.postgres.db.password }}"
|
||||
listen_address: "{{ k3s.db.ip }}"
|
||||
|
||||
k3s:
|
||||
|
|
|
@ -3,7 +3,7 @@ ansible_user: "{{ user }}"
|
|||
ansible_host: 192.168.20.25
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent00"
|
||||
|
|
|
@ -3,7 +3,7 @@ ansible_user: "{{ user }}"
|
|||
ansible_host: 192.168.20.26
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent01"
|
||||
|
|
|
@ -3,7 +3,7 @@ ansible_user: "{{ user }}"
|
|||
ansible_host: 192.168.20.27
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
|
||||
ansible_become_pass: "{{ vault.k3s.agent02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-agent02"
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.32
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn00.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn00"
|
||||
ip: "{{ ansible_host }}"
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.33
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn01.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn01"
|
||||
ip: "{{ ansible_host }}"
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
ansible_user: "{{ user }}"
|
||||
ansible_host: 192.168.20.31
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.longhorn02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-longhorn02"
|
||||
ip: "{{ ansible_host }}"
|
|
@ -3,7 +3,7 @@ ansible_user: "{{ user }}"
|
|||
ansible_host: 192.168.20.30
|
||||
ansible_port: 22
|
||||
ansible_ssh_private_key_file: "{{ pk_path }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server01.sudo }}"
|
||||
ansible_become_pass: "{{ vault.k3s.server02.sudo }}"
|
||||
|
||||
host:
|
||||
hostname: "k3s-server02"
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
- name: Set up storage
|
||||
hosts: k3s_nodes
|
||||
gather_facts: yes
|
||||
vars_files:
|
||||
- secrets.yml
|
||||
pre_tasks:
|
||||
- name: Get K3s token from the first server
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: k3s_token
|
||||
become: true
|
||||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0] and inventory_hostname in groups["k3s_server"]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
roles:
|
||||
- role: common
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
tags:
|
||||
- common
|
||||
- role: k3s_storage
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
k3s_token: "{{ hostvars[(hostvars | dict2items | map(attribute='value') | map('dict2items') | map('selectattr', 'key', 'match', 'host') | map('selectattr', 'value.ip', 'match', k3s.server.ips[0] ) | select() | first | items2dict).host.hostname].k3s_token }}"
|
||||
tags:
|
||||
- k3s_storage
|
||||
- role: node_exporter
|
||||
when: inventory_hostname in groups["k3s_storage"]
|
||||
tags:
|
||||
- node_exporter
|
14
production
14
production
|
@ -10,6 +10,9 @@ k3s-server02
|
|||
k3s-agent00
|
||||
k3s-agent01
|
||||
k3s-agent02
|
||||
k3s-longhorn00
|
||||
k3s-longhorn01
|
||||
k3s-longhorn02
|
||||
|
||||
[k3s_server]
|
||||
k3s-server00
|
||||
|
@ -21,6 +24,11 @@ k3s-agent00
|
|||
k3s-agent01
|
||||
k3s-agent02
|
||||
|
||||
[k3s_storage]
|
||||
k3s-longhorn00
|
||||
k3s-longhorn01
|
||||
k3s-longhorn02
|
||||
|
||||
[vm]
|
||||
k3s-agent00
|
||||
k3s-agent01
|
||||
|
@ -30,6 +38,9 @@ k3s-server01
|
|||
k3s-server02
|
||||
k3s-postgres
|
||||
k3s-loadbalancer
|
||||
k3s-longhorn00
|
||||
k3s-longhorn01
|
||||
k3s-longhorn02
|
||||
|
||||
[k3s_nodes]
|
||||
k3s-server00
|
||||
|
@ -38,6 +49,9 @@ k3s-server02
|
|||
k3s-agent00
|
||||
k3s-agent01
|
||||
k3s-agent02
|
||||
k3s-longhorn00
|
||||
k3s-longhorn01
|
||||
k3s-longhorn02
|
||||
|
||||
[db]
|
||||
k3s-postgres
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
case $- in
|
||||
*i*) ;;
|
||||
*) return;;
|
||||
*i*) ;;
|
||||
*) return ;;
|
||||
esac
|
||||
HISTCONTROL=ignoreboth
|
||||
shopt -s histappend
|
||||
|
@ -9,39 +9,38 @@ HISTSIZE=1000
|
|||
HISTFILESIZE=2000
|
||||
shopt -s checkwinsize
|
||||
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
case "$TERM" in
|
||||
xterm-color|*-256color) color_prompt=yes;;
|
||||
xterm-color | *-256color) color_prompt=yes ;;
|
||||
esac
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
if [ "$color_prompt" = yes ]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
fi
|
||||
unset color_prompt force_color_prompt
|
||||
case "$TERM" in
|
||||
xterm*|rxvt*)
|
||||
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
xterm* | rxvt*)
|
||||
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
if [ -x /usr/bin/dircolors ]; then
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
fi
|
||||
|
||||
if [ -f ~/.bash_aliases ]; then
|
||||
. ~/.bash_aliases
|
||||
. ~/.bash_aliases
|
||||
fi
|
||||
|
||||
if ! shopt -oq posix; then
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: Copy .bashrc
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: files/bash/bashrc
|
||||
dest: "/home/{{ user }}/.bashrc"
|
||||
owner: "{{ user }}"
|
||||
group: "{{ user }}"
|
||||
mode: 0644
|
||||
become: yes
|
||||
mode: "644"
|
||||
become: true
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
become: true
|
||||
|
||||
- name: Update /etc/hosts to reflect the new hostname
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: '^127\.0\.1\.1'
|
||||
line: "127.0.1.1 {{ host.hostname }}"
|
||||
state: present
|
||||
backup: yes
|
||||
backup: true
|
||||
become: true
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
---
|
||||
- include_tasks: time.yml
|
||||
- include_tasks: hostname.yml
|
||||
- include_tasks: packages.yml
|
||||
- include_tasks: bash.yml
|
||||
- include_tasks: sshd.yml
|
||||
- name: Configure Time
|
||||
ansible.builtin.include_tasks: time.yml
|
||||
- name: Configure Hostname
|
||||
ansible.builtin.include_tasks: hostname.yml
|
||||
- name: Configure Packages
|
||||
ansible.builtin.include_tasks: packages.yml
|
||||
- name: Configure Bash
|
||||
ansible.builtin.include_tasks: bash.yml
|
||||
- name: Configure SSH
|
||||
ansible.builtin.include_tasks: sshd.yml
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
---
|
||||
- name: Update and upgrade packages
|
||||
apt:
|
||||
update_cache: yes
|
||||
upgrade: yes
|
||||
autoremove: yes
|
||||
become: yes
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
upgrade: true
|
||||
autoremove: true
|
||||
become: true
|
||||
|
||||
- name: Install extra packages
|
||||
apt:
|
||||
ansible.builtin.apt:
|
||||
name: "{{ common_packages }}"
|
||||
state: present
|
||||
become: yes
|
||||
become: true
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
- name: Copy sshd_config
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: templates/ssh/sshd_config
|
||||
dest: /etc/ssh/sshd_config
|
||||
mode: 0644
|
||||
mode: "644"
|
||||
notify:
|
||||
- Restart sshd
|
||||
become: yes
|
||||
become: true
|
||||
|
||||
- name: Copy pubkey
|
||||
copy:
|
||||
ansible.builtin.copy:
|
||||
content: "{{ pubkey }}"
|
||||
dest: "/home/{{ user }}/.ssh/authorized_keys"
|
||||
owner: "{{ user }}"
|
||||
|
|
|
@ -1,124 +1,18 @@
|
|||
# $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
|
||||
|
||||
# This is the sshd server system-wide configuration file. See
|
||||
# sshd_config(5) for more information.
|
||||
|
||||
# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
|
||||
|
||||
# The strategy used for options in the default sshd_config shipped with
|
||||
# OpenSSH is to specify options with their default value where
|
||||
# possible, but leave them commented. Uncommented options override the
|
||||
# default value.
|
||||
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
Protocol 2
|
||||
#Port 22
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
#HostKey /etc/ssh/ssh_host_rsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
# Ciphers and keying
|
||||
#RekeyLimit default none
|
||||
|
||||
# Logging
|
||||
#SyslogFacility AUTH
|
||||
#LogLevel INFO
|
||||
|
||||
# Authentication:
|
||||
|
||||
#LoginGraceTime 2m
|
||||
PermitRootLogin no
|
||||
#StrictModes yes
|
||||
MaxAuthTries 3
|
||||
#MaxSessions 10
|
||||
|
||||
PubkeyAuthentication yes
|
||||
|
||||
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
|
||||
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
|
||||
|
||||
#AuthorizedPrincipalsFile none
|
||||
|
||||
#AuthorizedKeysCommand none
|
||||
#AuthorizedKeysCommandUser nobody
|
||||
|
||||
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
|
||||
#HostbasedAuthentication no
|
||||
# Change to yes if you don't trust ~/.ssh/known_hosts for
|
||||
# HostbasedAuthentication
|
||||
#IgnoreUserKnownHosts no
|
||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
||||
#IgnoreRhosts yes
|
||||
|
||||
# To disable tunneled clear text passwords, change to no here!
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
|
||||
# Change to yes to enable challenge-response passwords (beware issues with
|
||||
# some PAM modules and threads)
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
# Kerberos options
|
||||
#KerberosAuthentication no
|
||||
#KerberosOrLocalPasswd yes
|
||||
#KerberosTicketCleanup yes
|
||||
#KerberosGetAFSToken no
|
||||
|
||||
# GSSAPI options
|
||||
#GSSAPIAuthentication no
|
||||
#GSSAPICleanupCredentials yes
|
||||
#GSSAPIStrictAcceptorCheck yes
|
||||
#GSSAPIKeyExchange no
|
||||
|
||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
||||
# and session processing. If this is enabled, PAM authentication will
|
||||
# be allowed through the ChallengeResponseAuthentication and
|
||||
# PasswordAuthentication. Depending on your PAM configuration,
|
||||
# PAM authentication via ChallengeResponseAuthentication may bypass
|
||||
# the setting of "PermitRootLogin without-password".
|
||||
# If you just want the PAM account and session checks to run without
|
||||
# PAM authentication, then enable this but set PasswordAuthentication
|
||||
# and ChallengeResponseAuthentication to 'no'.
|
||||
UsePAM yes
|
||||
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding no
|
||||
#GatewayPorts no
|
||||
X11Forwarding no
|
||||
#X11DisplayOffset 10
|
||||
#X11UseLocalhost yes
|
||||
#PermitTTY yes
|
||||
PrintMotd no
|
||||
#PrintLastLog yes
|
||||
TCPKeepAlive no
|
||||
#PermitUserEnvironment no
|
||||
#Compression delayed
|
||||
#ClientAliveInterval 0
|
||||
ClientAliveCountMax 2
|
||||
UseDNS yes
|
||||
#PidFile /var/run/sshd.pid
|
||||
#MaxStartups 10:30:100
|
||||
#PermitTunnel no
|
||||
#ChrootDirectory none
|
||||
#VersionAddendum none
|
||||
|
||||
# no default banner path
|
||||
#Banner none
|
||||
|
||||
# Allow client to pass locale environment variables
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
# override default of no subsystems
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# Example of overriding settings on a per-user basis
|
||||
#Match User anoncvs
|
||||
# X11Forwarding no
|
||||
# AllowTcpForwarding no
|
||||
# PermitTTY no
|
||||
# ForceCommand cvs server
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
- name: See if k3s file exists
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s
|
||||
register: k3s_status
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
|||
|
||||
- name: Install K3s server with node taint and TLS SAN
|
||||
when: (host.ip == k3s.server.ips[0] and (not k3s_status.stat.exists))
|
||||
command: |
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh server \
|
||||
--node-taint CriticalAddonsOnly=true:NoExecute \
|
||||
--tls-san {{ k3s.loadbalancer.ip }}
|
||||
|
@ -43,11 +43,11 @@
|
|||
|
||||
- name: Set fact on k3s.server.ips[0]
|
||||
when: host.ip == k3s.server.ips[0]
|
||||
set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
ansible.builtin.set_fact: k3s_token="{{ k3s_token['content'] | b64decode | trim }}"
|
||||
|
||||
- name: Install K3s on the secondary servers
|
||||
when: (host.ip != k3s.server.ips[0] and (not k3s_status.stat.exists))
|
||||
command: |
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh server \
|
||||
--node-taint CriticalAddonsOnly=true:NoExecute \
|
||||
--tls-san {{ k3s.loadbalancer.ip }}
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- name: Restart k3s
|
||||
service:
|
||||
name: k3s
|
||||
state: restarted
|
||||
become: yes
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
- name: See if k3s file exists
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s
|
||||
register: k3s_status
|
||||
|
||||
- name: Download K3s install script to /tmp/
|
||||
when: not k3s_status.stat.exists
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp/k3s_install.sh
|
||||
mode: "0755"
|
||||
|
||||
- name: Install K3s on the secondary servers with longhorn affinity
|
||||
when: not k3s_status.stat.exists
|
||||
ansible.builtin.command: |
|
||||
/tmp/k3s_install.sh \
|
||||
--node-label longhorn=true
|
||||
environment:
|
||||
K3S_URL: "https://{{ k3s.loadbalancer.ip }}:{{ k3s.loadbalancer.default_port }}"
|
||||
K3S_TOKEN: "{{ k3s_token }}"
|
||||
become: true
|
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
- include_tasks: installation.yml
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
ansible-vault view secrets.yml | sed "s/: \w\+$/: ......../g" >>secrets.yml.skeleton
|
||||
ansible-vault view secrets.yml | sed "s/: [a-zA-Z0-9!\"]\+/: ......../" >secrets.yml.skeleton
|
||||
|
|
|
@ -1,4 +1,20 @@
|
|||
vault:
|
||||
k3s:
|
||||
server:
|
||||
server00:
|
||||
sudo: ........
|
||||
server01:
|
||||
sudo: ........
|
||||
server02:
|
||||
sudo: ........
|
||||
agent00:
|
||||
sudo: ........
|
||||
agent01:
|
||||
sudo: ........
|
||||
agent02:
|
||||
sudo: ........
|
||||
loadbalancer:
|
||||
sudo: ........
|
||||
postgres:
|
||||
sudo: ........
|
||||
db:
|
||||
password: ........
|
||||
|
|
Loading…
Reference in New Issue