Annihilate ansible
This commit is contained in:
parent
b63ef133c2
commit
636ac2e336
137 changed files with 169 additions and 3212 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,3 +1,2 @@
|
|||
secrets.yml
|
||||
pysecrets.yml
|
||||
*.rendered
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
script_relative_path="$(dirname "$0")"
|
||||
declare -i playbook_number=1
|
||||
mapfile -t playbook_list < <(find "$script_relative_path/playbooks" -type f | grep -oP '[^/]+\.yml$')
|
||||
|
||||
|
||||
echo "Playbook list:"
|
||||
for playbook in "${playbook_list[@]}"; do
|
||||
echo " [$playbook_number] $playbook"
|
||||
playbook_number+=1
|
||||
done
|
||||
|
||||
read -rp "Select playbook number to execute: " selected_playbook_number
|
||||
|
||||
|
||||
selected_playbook_name="${playbook_list[((selected_playbook_number - 1))]}"
|
||||
|
||||
if ! echo "$selected_playbook_number" | grep -qP '^[1-9][0-9]*$' || [[ -z "$selected_playbook_name" ]]; then
|
||||
echo "Invalid playbook number entered."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "Selected playbook: $selected_playbook_name"
|
||||
|
||||
ansible-playbook "$script_relative_path/playbooks/$selected_playbook_name"
|
|
@ -1,3 +0,0 @@
|
|||
[defaults]
|
||||
roles_path = ./roles
|
||||
ask_vault_pass = True
|
211
env.yml
211
env.yml
|
@ -1,79 +1,99 @@
|
|||
domain: viyurz.fr
|
||||
timezone: "Europe/Paris"
|
||||
host_uid: 1000
|
||||
project_dir: "{{ ansible_env['HOME'] }}/docker-projects/{{ role_name }}"
|
||||
docker_host: "unix:///run/user/{{ host_uid }}/docker.sock"
|
||||
|
||||
# UID shift for mapping between host & containers
|
||||
uid_shift: 99999
|
||||
<%!
|
||||
import os, subprocess
|
||||
|
||||
uid = os.getuid()
|
||||
rootless = os.path.exists(f"/run/user/{uid}/podman/podman.sock")
|
||||
%>
|
||||
% if rootless:
|
||||
rootless: true
|
||||
podman_uid: ${uid}
|
||||
uid_shift: ${int(subprocess.run(['sh', '-c', "grep " + os.getlogin() + " /etc/subuid | cut -d ':' -f 2"], capture_output=True, text=True).stdout.strip()) - 1}
|
||||
socket: "/run/user/${uid}/podman/podman.sock"
|
||||
% else:
|
||||
rootless: false
|
||||
podman_uid: 0
|
||||
uid_shift: 0
|
||||
socket: "/run/podman/podman.sock"
|
||||
% endif
|
||||
|
||||
|
||||
# cifs_credentials is undefined when we run the backup playbook
|
||||
# as a cronjob, so set empty default value to prevent errors,
|
||||
# which is fine because we don't use it.
|
||||
cifs_host: "{{ cifs_credentials['username'] | default('') }}.your-storagebox.de"
|
||||
backup:
|
||||
etebase:
|
||||
- /mnt/etebasedata/media
|
||||
hedgedoc:
|
||||
- /mnt/hedgedocuploads
|
||||
mailserver:
|
||||
- /mnt/mailserver/etc/config.toml
|
||||
synapse:
|
||||
- /mnt/synapsedata
|
||||
vaultwarden:
|
||||
- /mnt/vwdata/attachments
|
||||
|
||||
cifs_mounts:
|
||||
backups:
|
||||
src: "//{{ cifs_host }}/backup/backups"
|
||||
path: /mnt/storagebox/backups
|
||||
uid: 0
|
||||
gid: "{{ host_uid }}"
|
||||
file_mode: 640
|
||||
dir_mode: 750
|
||||
backup_sqlite:
|
||||
stump: /mnt/stump/config/stump.db
|
||||
uptime: /mnt/uptimekumadata/kuma.db
|
||||
|
||||
borg_repo: /mnt/storagebox/backups/borg2
|
||||
borg_prune_opts:
|
||||
- "--keep-within=1d"
|
||||
- "--keep-daily=7"
|
||||
- "--keep-weekly=4"
|
||||
- "--keep-monthly=12"
|
||||
- "--keep-yearly=86"
|
||||
|
||||
|
||||
certs:
|
||||
coturn:
|
||||
cert: "/etc/letsencrypt/live/turn.viyurz.fr/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/turn.viyurz.fr/privkey.pem"
|
||||
mailserver:
|
||||
cert: "/etc/letsencrypt/live/mail.viyurz.fr/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/mail.viyurz.fr/privkey.pem"
|
||||
|
||||
|
||||
pasta:
|
||||
coturn:
|
||||
ipv4: 10.86.3.1
|
||||
ipv6: fc86::3
|
||||
etebase:
|
||||
ipv4: 10.86.5.1
|
||||
ipv6: fc86::5
|
||||
fireshare:
|
||||
src: "//{{ cifs_host }}/backup/fireshare"
|
||||
path: /mnt/storagebox/fireshare
|
||||
uid: "{{ users['fireshare'] + uid_shift }}"
|
||||
gid: "{{ users['fireshare'] + uid_shift }}"
|
||||
file_mode: 644
|
||||
dir_mode: 755
|
||||
storagebox:
|
||||
src: "//{{ cifs_host }}/backup"
|
||||
path: /mnt/storagebox
|
||||
uid: 0
|
||||
gid: 0
|
||||
file_mode: 640
|
||||
dir_mode: 751
|
||||
ipv4: 10.86.6.1
|
||||
ipv6: fc86::6
|
||||
hedgedoc:
|
||||
ipv4: 10.86.8.1
|
||||
ipv6: fc86::8
|
||||
keycloak:
|
||||
ipv4: 10.86.11.1
|
||||
ipv6: fc86::11
|
||||
mailserver:
|
||||
ipv4: 10.86.13.1
|
||||
ipv6: fc86::13
|
||||
postgres:
|
||||
ipv4: 10.86.16.1
|
||||
ipv6: fc86::16
|
||||
stump:
|
||||
ipv4: 10.86.18.1
|
||||
ipv6: fc86::18
|
||||
synapse:
|
||||
ipv4: 10.86.19.1
|
||||
ipv6: fc86::19
|
||||
syncthing:
|
||||
src: "//{{ cifs_host }}/backup/syncthing"
|
||||
path: /mnt/storagebox/syncthing
|
||||
uid: "{{ users['syncthing'] + uid_shift }}"
|
||||
gid: "{{ users['syncthing'] + uid_shift }}"
|
||||
file_mode: 640
|
||||
dir_mode: 750
|
||||
|
||||
|
||||
projects:
|
||||
- coturn
|
||||
- diun
|
||||
- etebase
|
||||
- fireshare
|
||||
- hedgedoc
|
||||
- homepage
|
||||
- keycloak
|
||||
- mailserver
|
||||
- postgres
|
||||
- searxng
|
||||
- stump
|
||||
- synapse
|
||||
- syncthing
|
||||
- uptime-kuma
|
||||
- vaultwarden
|
||||
|
||||
|
||||
projects_to_backup:
|
||||
- keycloak
|
||||
|
||||
|
||||
borg_repodir: "{{ cifs_mounts['backups']['path'] }}/borg"
|
||||
borg_passphrase_file: /etc/borg-passphrase.txt
|
||||
borg_prune_options: |
|
||||
--keep-within=1d
|
||||
--keep-daily=7
|
||||
--keep-weekly=4
|
||||
--keep-monthly=12
|
||||
--keep-yearly=10
|
||||
ipv4: 10.86.20.1
|
||||
ipv6: fc86::20
|
||||
syncthing_relaysrv:
|
||||
ipv4: 10.86.21.1
|
||||
ipv6: fc86::21
|
||||
uptime:
|
||||
ipv4: 10.86.22.1
|
||||
ipv6: fc86::22
|
||||
vaultwarden:
|
||||
ipv4: 10.86.23.1
|
||||
ipv6: fc86::23
|
||||
|
||||
|
||||
# Ports exposed to host
|
||||
|
@ -96,18 +116,18 @@ ports:
|
|||
stump: 10801
|
||||
synapse: 8008
|
||||
syncthing_discosrv: 8443
|
||||
# Public port, forwarded to 22067 by nftables
|
||||
syncthing_relaysrv: 143
|
||||
syncthing_relaysrv: 143 # Public port, forwarded to 22067 by nftables
|
||||
syncthing_webui: 8384
|
||||
syncthing_tcp: 18880
|
||||
syncthing_tcp: 9100
|
||||
syncthing_udp: 22000
|
||||
uptime_kuma: 3001
|
||||
uptime: 3001
|
||||
vaultwarden: 8081
|
||||
|
||||
|
||||
# UID in containers
|
||||
users:
|
||||
coturn: 666
|
||||
diun: 0
|
||||
etebase: 373
|
||||
fireshare: 1007
|
||||
hedgedoc: 1004
|
||||
|
@ -122,30 +142,31 @@ users:
|
|||
syncthing: 1001
|
||||
syncthing_discosrv: 1002
|
||||
syncthing_relaysrv: 1003
|
||||
uptime_kuma: 1006
|
||||
uptime: 1006
|
||||
vaultwarden: 1010
|
||||
|
||||
|
||||
volumes:
|
||||
coturn_tls_certificate_file: "/etc/letsencrypt/live/turn.{{ domain }}/fullchain.pem"
|
||||
coturn_tls_certificate_key_file: "/etc/letsencrypt/live/turn.{{ domain }}/privkey.pem"
|
||||
etebase_datadir: /mnt/etebasedata
|
||||
fireshare_datadir: /mnt/firesharedata
|
||||
fireshare_processeddir: /mnt/storagebox/fireshare/processed
|
||||
fireshare_videosdir: /mnt/storagebox/fireshare/videos
|
||||
hedgedoc_uploadsdir: /mnt/hedgedocuploads
|
||||
mailserver_datadir: /mnt/mailserver
|
||||
mailserver_tls_certificate_file: "/etc/letsencrypt/live/mail.{{ domain }}/fullchain.pem"
|
||||
mailserver_tls_certificate_key_file: "/etc/letsencrypt/live/mail.{{ domain }}/privkey.pem"
|
||||
postgres_datadir: /mnt/postgresdata
|
||||
stump_configdir: /mnt/stump/config
|
||||
stump_datadir: /mnt/stump/data
|
||||
synapse_datadir: /mnt/synapsedata
|
||||
syncthing_datadir: "{{ cifs_mounts['syncthing']['path'] }}"
|
||||
uptime_kuma_datadir: /mnt/uptimekumadata
|
||||
vaultwarden_datadir: /mnt/vwdata
|
||||
|
||||
|
||||
# Service-specific variables
|
||||
synapse:
|
||||
max_upload_size: 50M
|
||||
etebase:
|
||||
datadir: /mnt/etebasedata
|
||||
fireshare:
|
||||
datadir: /mnt/firesharedata
|
||||
processeddir: /mnt/storagebox/fireshare/processed
|
||||
videosdir: /mnt/storagebox/fireshare/videos
|
||||
hedgedoc:
|
||||
uploadsdir: /mnt/hedgedocuploads
|
||||
mailserver:
|
||||
datadir: /mnt/mailserver
|
||||
postgres:
|
||||
datadir: /mnt/postgresdata
|
||||
stump:
|
||||
configdir: /mnt/stump/config
|
||||
datadir: /mnt/stump/data
|
||||
synapse:
|
||||
datadir: /mnt/synapsedata
|
||||
syncthing:
|
||||
datadir: /mnt/storagebox/syncthing
|
||||
uptime:
|
||||
datadir: /mnt/uptimekumadata
|
||||
vaultwarden:
|
||||
datadir: /mnt/vwdata
|
||||
|
|
|
@ -361,8 +361,8 @@ def updateProj(project):
|
|||
|
||||
|
||||
def main():
|
||||
envFile = "pyenv.yml"
|
||||
secretsFile = "pysecrets.yml"
|
||||
envFile = "env.yml"
|
||||
secretsFile = "secrets.yml"
|
||||
|
||||
os.chdir(os.path.realpath(sys.path[0]))
|
||||
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
- name: Include variables files & run borg-init role
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
- borg-init
|
||||
- backup-secrets
|
||||
|
||||
- name: Backup project(s)
|
||||
hosts: localhost
|
||||
vars:
|
||||
run_backup: true
|
||||
vars_prompt:
|
||||
- name: selected_projects
|
||||
prompt: "Choose projects to backup (leave empty to backup all. Projects list: {{ hostvars['localhost']['projects_to_backup'] }})"
|
||||
private: false
|
||||
unsafe: true
|
||||
|
||||
tasks:
|
||||
- name: Backup project(s)
|
||||
include_role:
|
||||
name: "{{ project }}"
|
||||
loop: "{{ (selected_projects | split) | default(projects_to_backup, true) }}"
|
||||
loop_control:
|
||||
# Do not use default variable name 'item' to prevent collisions with loops in roles.
|
||||
loop_var: project
|
||||
when: project in projects_to_backup
|
||||
|
||||
- name: Compact borg repository
|
||||
hosts: localhost
|
||||
roles:
|
||||
- borg-compact
|
|
@ -1,10 +0,0 @@
|
|||
# Playbook to mount CIFS mounts defined in env.yml
|
||||
- name: Include variables files
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
|
||||
- name: Edit fstab configuration & mount CIFS devices
|
||||
hosts: localhost
|
||||
roles:
|
||||
- fstab
|
|
@ -1,5 +0,0 @@
|
|||
- name: Setup Docker rootless
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
- dockerd
|
|
@ -1,5 +0,0 @@
|
|||
- name: Include variables files & setup NGINX
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
- nginx
|
|
@ -1,31 +0,0 @@
|
|||
- name: Include variables files
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
|
||||
- name: Setup & update project(s)
|
||||
hosts: localhost
|
||||
vars:
|
||||
run_backup: true
|
||||
run_setup: true
|
||||
run_update: true
|
||||
vars_prompt:
|
||||
- name: selected_projects
|
||||
prompt: "Choose projects to setup & update (Keep empty to select all. Projects list: {{ hostvars['localhost']['projects'] }})"
|
||||
private: false
|
||||
unsafe: true
|
||||
|
||||
- name: docker_pull_images
|
||||
prompt: "Pull project(s) images?"
|
||||
default: false
|
||||
private: false
|
||||
|
||||
tasks:
|
||||
- name: Setup & update project(s)
|
||||
include_role:
|
||||
name: "{{ project }}"
|
||||
loop: "{{ (selected_projects | split) | default(projects, true) }}"
|
||||
loop_control:
|
||||
# Do not use default variable name 'item' to prevent collisions with loops in roles.
|
||||
loop_var: project
|
||||
when: project in projects
|
|
@ -1,5 +0,0 @@
|
|||
- name: Include variables files & load nftables.conf
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
- nftables
|
|
@ -1,25 +0,0 @@
|
|||
- name: Include variables files
|
||||
hosts: localhost
|
||||
roles:
|
||||
- include-vars
|
||||
|
||||
- name: Update project(s)
|
||||
hosts: localhost
|
||||
vars:
|
||||
docker_pull_images: true
|
||||
run_update: true
|
||||
vars_prompt:
|
||||
- name: selected_projects
|
||||
prompt: "Choose projects to update (Keep empty to update all. Projects list: {{ hostvars['localhost']['projects'] }})"
|
||||
private: false
|
||||
unsafe: true
|
||||
|
||||
tasks:
|
||||
- name: Update project(s)
|
||||
include_role:
|
||||
name: "{{ project }}"
|
||||
loop: "{{ (selected_projects | split) | default(projects, true) }}"
|
||||
loop_control:
|
||||
# Do not use default variable name 'item' to prevent collisions with loops in roles.
|
||||
loop_var: project
|
||||
when: project in projects
|
172
pyenv.yml
172
pyenv.yml
|
@ -1,172 +0,0 @@
|
|||
domain: viyurz.fr
|
||||
timezone: "Europe/Paris"
|
||||
|
||||
<%!
|
||||
import os, subprocess
|
||||
|
||||
uid = os.getuid()
|
||||
rootless = os.path.exists(f"/run/user/{uid}/podman/podman.sock")
|
||||
%>
|
||||
% if rootless:
|
||||
rootless: true
|
||||
podman_uid: ${uid}
|
||||
uid_shift: ${int(subprocess.run(['sh', '-c', "grep " + os.getlogin() + " /etc/subuid | cut -d ':' -f 2"], capture_output=True, text=True).stdout.strip()) - 1}
|
||||
socket: "/run/user/${uid}/podman/podman.sock"
|
||||
% else:
|
||||
rootless: false
|
||||
podman_uid: 0
|
||||
uid_shift: 0
|
||||
socket: "/run/podman/podman.sock"
|
||||
% endif
|
||||
|
||||
|
||||
backup:
|
||||
etebase:
|
||||
- /mnt/etebasedata/media
|
||||
hedgedoc:
|
||||
- /mnt/hedgedocuploads
|
||||
mailserver:
|
||||
- /mnt/mailserver/etc/config.toml
|
||||
synapse:
|
||||
- /mnt/synapsedata
|
||||
vaultwarden:
|
||||
- /mnt/vwdata/attachments
|
||||
|
||||
backup_sqlite:
|
||||
stump: /mnt/stump/config/stump.db
|
||||
uptime: /mnt/uptimekumadata/kuma.db
|
||||
|
||||
borg_repo: /mnt/storagebox/backups/borg2
|
||||
borg_prune_opts:
|
||||
- "--keep-within=1d"
|
||||
- "--keep-daily=7"
|
||||
- "--keep-weekly=4"
|
||||
- "--keep-monthly=12"
|
||||
- "--keep-yearly=86"
|
||||
|
||||
|
||||
certs:
|
||||
coturn:
|
||||
cert: "/etc/letsencrypt/live/turn.viyurz.fr/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/turn.viyurz.fr/privkey.pem"
|
||||
mailserver:
|
||||
cert: "/etc/letsencrypt/live/mail.viyurz.fr/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/mail.viyurz.fr/privkey.pem"
|
||||
|
||||
|
||||
pasta:
|
||||
coturn:
|
||||
ipv4: 10.86.3.1
|
||||
ipv6: fc86::3
|
||||
etebase:
|
||||
ipv4: 10.86.5.1
|
||||
ipv6: fc86::5
|
||||
fireshare:
|
||||
ipv4: 10.86.6.1
|
||||
ipv6: fc86::6
|
||||
hedgedoc:
|
||||
ipv4: 10.86.8.1
|
||||
ipv6: fc86::8
|
||||
keycloak:
|
||||
ipv4: 10.86.11.1
|
||||
ipv6: fc86::11
|
||||
mailserver:
|
||||
ipv4: 10.86.13.1
|
||||
ipv6: fc86::13
|
||||
postgres:
|
||||
ipv4: 10.86.16.1
|
||||
ipv6: fc86::16
|
||||
stump:
|
||||
ipv4: 10.86.18.1
|
||||
ipv6: fc86::18
|
||||
synapse:
|
||||
ipv4: 10.86.19.1
|
||||
ipv6: fc86::19
|
||||
syncthing:
|
||||
ipv4: 10.86.20.1
|
||||
ipv6: fc86::20
|
||||
syncthing_relaysrv:
|
||||
ipv4: 10.86.21.1
|
||||
ipv6: fc86::21
|
||||
uptime:
|
||||
ipv4: 10.86.22.1
|
||||
ipv6: fc86::22
|
||||
vaultwarden:
|
||||
ipv4: 10.86.23.1
|
||||
ipv6: fc86::23
|
||||
|
||||
|
||||
# Ports exposed to host
|
||||
ports:
|
||||
coturn_listening: 3478
|
||||
coturn_tls_listening: 5349
|
||||
coturn_relay_min: 49152
|
||||
coturn_relay_max: 49172
|
||||
etebase: 3735
|
||||
fireshare: 8085
|
||||
hedgedoc: 8086
|
||||
homepage: 8686
|
||||
keycloak: 8444
|
||||
mailserver_smtp: 1025
|
||||
mailserver_smtps: 1465
|
||||
mailserver_imaps: 1993
|
||||
mailserver_https: 1443
|
||||
postgres: 5432
|
||||
searxng: 8083
|
||||
stump: 10801
|
||||
synapse: 8008
|
||||
syncthing_discosrv: 8443
|
||||
syncthing_relaysrv: 143 # Public port, forwarded to 22067 by nftables
|
||||
syncthing_webui: 8384
|
||||
syncthing_tcp: 9100
|
||||
syncthing_udp: 22000
|
||||
uptime: 3001
|
||||
vaultwarden: 8081
|
||||
|
||||
|
||||
# UID in containers
|
||||
users:
|
||||
coturn: 666
|
||||
diun: 0
|
||||
etebase: 373
|
||||
fireshare: 1007
|
||||
hedgedoc: 1004
|
||||
homepage: 8686
|
||||
keycloak: 1000
|
||||
mailserver: 8
|
||||
postgres: 70
|
||||
searxng: 977
|
||||
searxng_valkey: 999
|
||||
stump: 1005
|
||||
synapse: 991
|
||||
syncthing: 1001
|
||||
syncthing_discosrv: 1002
|
||||
syncthing_relaysrv: 1003
|
||||
uptime: 1006
|
||||
vaultwarden: 1010
|
||||
|
||||
|
||||
volumes:
|
||||
etebase:
|
||||
datadir: /mnt/etebasedata
|
||||
fireshare:
|
||||
datadir: /mnt/firesharedata
|
||||
processeddir: /mnt/storagebox/fireshare/processed
|
||||
videosdir: /mnt/storagebox/fireshare/videos
|
||||
hedgedoc:
|
||||
uploadsdir: /mnt/hedgedocuploads
|
||||
mailserver:
|
||||
datadir: /mnt/mailserver
|
||||
postgres:
|
||||
datadir: /mnt/postgresdata
|
||||
stump:
|
||||
configdir: /mnt/stump/config
|
||||
datadir: /mnt/stump/data
|
||||
synapse:
|
||||
datadir: /mnt/synapsedata
|
||||
syncthing:
|
||||
datadir: /mnt/storagebox/syncthing
|
||||
uptime:
|
||||
datadir: /mnt/uptimekumadata
|
||||
vaultwarden:
|
||||
datadir: /mnt/vwdata
|
|
@ -1,60 +0,0 @@
|
|||
# To generate a random secret: openssl rand -base64 <length>
|
||||
|
||||
borg:
|
||||
|
||||
|
||||
diun_webhookurl:
|
||||
|
||||
fireshare:
|
||||
admin_user:
|
||||
admin_pass:
|
||||
key:
|
||||
|
||||
hedgedoc_session:
|
||||
|
||||
keycloak:
|
||||
hedgedoc:
|
||||
id:
|
||||
secret:
|
||||
synapse:
|
||||
id:
|
||||
secret:
|
||||
|
||||
mailserver:
|
||||
synapse:
|
||||
user:
|
||||
pass:
|
||||
vaultwarden:
|
||||
user:
|
||||
pass:
|
||||
|
||||
postgres:
|
||||
# https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters
|
||||
etebase:
|
||||
user:
|
||||
pass: # No '%' character allowed
|
||||
keycloak:
|
||||
user:
|
||||
pass:
|
||||
hedgedoc:
|
||||
user:
|
||||
pass:
|
||||
mailserver:
|
||||
user:
|
||||
pass:
|
||||
synapse:
|
||||
user:
|
||||
pass:
|
||||
vaultwarden:
|
||||
user:
|
||||
pass:
|
||||
|
||||
searxng:
|
||||
|
||||
synapse:
|
||||
macaroon:
|
||||
form:
|
||||
|
||||
turn_static_auth:
|
||||
|
||||
vw_admin_token_hash:
|
|
@ -1,22 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Create borg backup
|
||||
command:
|
||||
cmd: |
|
||||
borg create
|
||||
--compression=lzma
|
||||
"{{ borg_repodir }}::secrets-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
{{ playbook_dir }}/../secrets.yml
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='secrets-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
|
@ -1,4 +0,0 @@
|
|||
- name: Compact borg repository
|
||||
command:
|
||||
cmd: "borg compact {{ borg_repodir }}"
|
||||
become: true
|
|
@ -1,35 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Install packages borgbackup & sqlite3
|
||||
apt:
|
||||
name:
|
||||
- borgbackup
|
||||
# SQLite required for Vaultwarden
|
||||
- sqlite3
|
||||
|
||||
- name: Get borg passphrase file stat
|
||||
stat:
|
||||
path: "{{ borg_passphrase_file }}"
|
||||
register: borg_stat_passphrase_file_result
|
||||
|
||||
- name: "Template borg-passphrase.txt to {{ borg_passphrase_file }}"
|
||||
template:
|
||||
src: borg-passphrase.txt
|
||||
dest: "{{ borg_passphrase_file }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '600'
|
||||
when: not borg_stat_passphrase_file_result.stat.exists or borg_update_passphrase | default(false) | bool
|
||||
|
||||
- name: Get borg repository stat
|
||||
stat:
|
||||
path: "{{ borg_repodir }}"
|
||||
register: borg_stat_repodir_result
|
||||
|
||||
- name: Create borg repository
|
||||
command:
|
||||
cmd: "borg init --encryption repokey {{ borg_repodir }}"
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
when: not borg_stat_repodir_result.stat.exists
|
|
@ -1 +0,0 @@
|
|||
{{ borg_passphrase }}
|
|
@ -1,9 +0,0 @@
|
|||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,58 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & turnserver.conf to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '640'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- turnserver.conf
|
||||
|
||||
# Separate task because template module cannot chown/chgrp to a non-existing user/group
|
||||
- name: "Change group of turnserver.conf to coturn GID ({{ users['coturn'] + uid_shift }})"
|
||||
file:
|
||||
path: "{{ project_dir }}/turnserver.conf"
|
||||
group: "{{ users['coturn'] + uid_shift }}"
|
||||
become: true
|
||||
|
||||
- name: Set limited permissions on certificate directories
|
||||
file:
|
||||
path: "/etc/{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '751'
|
||||
become: true
|
||||
loop:
|
||||
- letsencrypt
|
||||
- letsencrypt/live
|
||||
- letsencrypt/archive
|
||||
|
||||
- name: Set limited permissions on certificate directories
|
||||
file:
|
||||
path: "/etc/letsencrypt/{{ item }}/turn.{{ domain }}"
|
||||
state: directory
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ users['coturn'] + uid_shift }}"
|
||||
mode: '550'
|
||||
become: true
|
||||
loop:
|
||||
- live
|
||||
- archive
|
||||
|
||||
- name: Set limited permissions on certificate key file
|
||||
file:
|
||||
path: "/etc/letsencrypt/live/turn.{{ domain }}/privkey.pem"
|
||||
owner: root
|
||||
group: "{{ users['coturn'] + uid_shift }}"
|
||||
mode: '640'
|
||||
become: true
|
|
@ -1,18 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: coturn_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
coturn_pulled_images: "{{ coturn_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ coturn_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,18 +0,0 @@
|
|||
services:
|
||||
coturn:
|
||||
container_name: coturn
|
||||
image: docker.io/coturn/coturn:4-alpine
|
||||
restart: always
|
||||
user: {{ users['coturn'] }}:{{ users['coturn'] }}
|
||||
ports:
|
||||
- {{ ports['coturn_listening'] }}:{{ ports['coturn_listening'] }}
|
||||
- {{ ports['coturn_listening'] }}:{{ ports['coturn_listening'] }}/udp
|
||||
- {{ ports['coturn_tls_listening'] }}:{{ ports['coturn_tls_listening'] }}
|
||||
- {{ ports['coturn_tls_listening'] }}:{{ ports['coturn_tls_listening'] }}/udp
|
||||
- {{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }}:{{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }}/udp
|
||||
tmpfs:
|
||||
- /var/lib/coturn
|
||||
volumes:
|
||||
- ./turnserver.conf:/etc/coturn/turnserver.conf
|
||||
- {{ volumes['coturn_tls_certificate_file'] }}:/etc/coturn/cert.pem
|
||||
- {{ volumes['coturn_tls_certificate_key_file'] }}:/etc/coturn/pkey.pem
|
|
@ -1,68 +0,0 @@
|
|||
listening-port={{ ports['coturn_listening'] }}
|
||||
tls-listening-port={{ ports['coturn_tls_listening'] }}
|
||||
|
||||
# Lower and upper bounds of the UDP relay endpoints:
|
||||
# (default values are 49152 and 65535)
|
||||
min-port={{ ports['coturn_relay_min'] }}
|
||||
max-port={{ ports['coturn_relay_max'] }}
|
||||
|
||||
#verbose
|
||||
fingerprint
|
||||
|
||||
# Credentials in secrets.conf (static-auth-secret)
|
||||
use-auth-secret
|
||||
static-auth-secret={{ coturn_secrets['static_auth_secret'] }}
|
||||
|
||||
realm=turn.{{ domain }}
|
||||
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/etc/coturn/cert.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/etc/coturn/pkey.pem
|
||||
|
||||
# Do not allow an TLS/DTLS version of protocol
|
||||
no-tlsv1
|
||||
no-tlsv1_1
|
||||
|
||||
# Disable RFC5780 (NAT behavior discovery).
|
||||
no-rfc5780
|
||||
no-stun-backward-compatibility
|
||||
response-origin-only-with-rfc5780
|
||||
no-cli
|
||||
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
|
@ -1,9 +0,0 @@
|
|||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,19 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml, .env & images.yml to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '640'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- .env
|
||||
- images.yml
|
|
@ -1,24 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: diun_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
diun_pulled_images: "{{ diun_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ diun_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and diun_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,6 +0,0 @@
|
|||
TZ={{ timezone }}
|
||||
DIUN_WATCH_SCHEDULE='0 */6 * * *'
|
||||
DIUN_PROVIDERS_DOCKER=true
|
||||
DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true
|
||||
DIUN_PROVIDERS_FILE_FILENAME=/etc/diun/images.yml
|
||||
DIUN_NOTIF_DISCORD_WEBHOOKURL='{{ diun_secrets["webhookurl"] }}'
|
|
@ -1,14 +0,0 @@
|
|||
services:
|
||||
diun:
|
||||
image: docker.io/crazymax/diun:4
|
||||
container_name: diun
|
||||
command: serve
|
||||
restart: always
|
||||
env_file: .env
|
||||
volumes:
|
||||
- {{ docker_host | regex_replace('^unix://', '') }}:/var/run/docker.sock:ro
|
||||
- ./images.yml:/etc/diun/images.yml:ro
|
||||
- data:/data
|
||||
|
||||
volumes:
|
||||
data:
|
|
@ -1,27 +0,0 @@
|
|||
- name: quay.io/hedgedoc/hedgedoc
|
||||
watch_repo: true
|
||||
sort_tags: semver
|
||||
max_tags: 1
|
||||
include_tags:
|
||||
- ^[\d\.]+-alpine$
|
||||
|
||||
- name: quay.io/keycloak/keycloak
|
||||
watch_repo: true
|
||||
sort_tags: semver
|
||||
max_tags: 1
|
||||
include_tags:
|
||||
- ^\d+\.\d+$
|
||||
|
||||
- name: docker.io/stalwartlabs/mail-server
|
||||
watch_repo: true
|
||||
sort_tags: semver
|
||||
max_tags: 1
|
||||
include_tags:
|
||||
- ^v
|
||||
|
||||
- name: docker.io/aaronleopold/stump
|
||||
watch_repo: true
|
||||
sort_tags: semver
|
||||
max_tags: 1
|
||||
include_tags:
|
||||
- ^\d
|
|
@ -1,3 +0,0 @@
|
|||
[Service]
|
||||
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns"
|
||||
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns"
|
|
@ -1,81 +0,0 @@
|
|||
- name: Make sure required packages are installed
|
||||
apt:
|
||||
name:
|
||||
- dbus-user-session
|
||||
- docker.io
|
||||
- docker-compose
|
||||
- rootlesskit
|
||||
- slirp4netns
|
||||
- uidmap
|
||||
become: true
|
||||
|
||||
- name: Make sure system-wide Docker daemon & socket are stopped & disabled
|
||||
service:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
enabled: false
|
||||
loop:
|
||||
- docker
|
||||
- docker.socket
|
||||
become: true
|
||||
|
||||
- name: Get docker user service status
|
||||
stat:
|
||||
path: "{{ ansible_env['HOME'] }}/.config/systemd/user/docker.service"
|
||||
register: dockerd_user_service_file_result
|
||||
|
||||
- name: Run dockerd-rootless-setuptool.sh script
|
||||
command:
|
||||
cmd: /usr/share/docker.io/contrib/dockerd-rootless-setuptool.sh install
|
||||
# Don't run install script everytime
|
||||
when: not dockerd_user_service_file_result.stat.exists
|
||||
|
||||
- name: Make sure /usr/share/docker.io/contrib is in PATH variable
|
||||
lineinfile:
|
||||
path: "{{ ansible_env['HOME'] }}/.profile"
|
||||
regex: '^export PATH="/usr/share/docker\.io/contrib'
|
||||
line: 'export PATH="/usr/share/docker.io/contrib:$PATH"'
|
||||
|
||||
- name: Make sure DOCKER_HOST variable is set correctly
|
||||
lineinfile:
|
||||
path: "{{ ansible_env['HOME'] }}/.profile"
|
||||
regex: '^export DOCKER_HOST='
|
||||
line: "export DOCKER_HOST={{ docker_host }}"
|
||||
|
||||
- name: "Make sure lingering is enabled for user {{ host_uid }}"
|
||||
command:
|
||||
cmd: "loginctl enable-linger {{ host_uid }}"
|
||||
become: true
|
||||
|
||||
- name: "Create directory {{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d"
|
||||
file:
|
||||
path: "{{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d"
|
||||
state: directory
|
||||
|
||||
# Set port driver to slirp4netns to enable source IP propagation, which is required for coturn to work.
|
||||
- name: "Copy systemd service override.conf to {{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d/override.conf"
|
||||
copy:
|
||||
src: "{{ role_path }}/files/override.conf"
|
||||
dest: "{{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d/override.conf"
|
||||
register: dockerd_copy_override_conf_result
|
||||
|
||||
- name: Edit some sysctl entries for Redis & Syncthing
|
||||
sysctl:
|
||||
name: "{{ item.key }}"
|
||||
value: "{{ item.value }}"
|
||||
loop:
|
||||
- key: vm.overcommit_memory
|
||||
value: 1
|
||||
- key: net.core.wmem_max
|
||||
value: 2500000
|
||||
- key: net.core.rmem_max
|
||||
value: 2500000
|
||||
become: true
|
||||
|
||||
- name: Start/restart & enable Docker user service
|
||||
service:
|
||||
name: docker
|
||||
scope: user
|
||||
# Restart only if config file(s) changed
|
||||
state: "{{ (dockerd_copy_override_conf_result.changed) | ternary('restarted', 'started') }}"
|
||||
enabled: true
|
|
@ -1,30 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Backup SQLite database
|
||||
command:
|
||||
cmd: |
|
||||
sqlite3
|
||||
"{{ volumes['etebase_datadir'] }}/db.sqlite3"
|
||||
".backup {{ volumes['etebase_datadir'] }}/db-backup.sqlite3"
|
||||
|
||||
- name: Create borg backup
|
||||
command:
|
||||
cmd: |
|
||||
borg create
|
||||
--compression=lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
{{ volumes['etebase_datadir'] }}/db-backup.sqlite3
|
||||
{{ volumes['etebase_datadir'] }}/media
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,27 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & etebase-server.ini to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '644'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- etebase-server.ini
|
||||
|
||||
- name: "Create (if not exists) directory {{ volumes['etebase_datadir'] }} & set permissions"
|
||||
file:
|
||||
path: "{{ volumes['etebase_datadir'] }}"
|
||||
state: directory
|
||||
owner: "{{ users['etebase'] + uid_shift }}"
|
||||
group: "{{ users['etebase'] + uid_shift }}"
|
||||
mode: '770'
|
||||
become: true
|
|
@ -1,24 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: etebase_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
etebase_pulled_images: "{{ etebase_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ etebase_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and etebase_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,14 +0,0 @@
|
|||
services:
|
||||
etebase:
|
||||
image: docker.io/victorrds/etebase:alpine
|
||||
container_name: etebase
|
||||
restart: always
|
||||
user: {{ users['etebase'] }}:{{ users['etebase'] }}
|
||||
environment:
|
||||
SERVER: http
|
||||
AUTO_UPDATE: 'true'
|
||||
ports:
|
||||
- 127.0.0.1:{{ ports['etebase'] }}:3735
|
||||
volumes:
|
||||
- {{ volumes['etebase_datadir'] }}:/data
|
||||
- ./etebase-server.ini:/data/etebase-server.ini
|
|
@ -1,17 +0,0 @@
|
|||
[global]
|
||||
secret_file = /data/secret.txt
|
||||
debug = false
|
||||
static_root = /srv/etebase/static
|
||||
static_url = /static/
|
||||
media_root = /data/media
|
||||
media_url = /user-media/
|
||||
language_code = en-us
|
||||
time_zone = {{ timezone }}
|
||||
|
||||
|
||||
[allowed_hosts]
|
||||
allowed_host1 = etebase.{{ domain }}
|
||||
|
||||
[database]
|
||||
engine = django.db.backends.sqlite3
|
||||
name = /data/db.sqlite3
|
|
@ -1,9 +0,0 @@
|
|||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,27 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & .env to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '640'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- .env
|
||||
|
||||
- name: "Create (if not exists) directory {{ volumes['fireshare_datadir'] }} & set permissions"
|
||||
file:
|
||||
path: "{{ volumes['fireshare_datadir'] }}"
|
||||
state: directory
|
||||
owner: "{{ users['fireshare'] + uid_shift }}"
|
||||
group: "{{ users['fireshare'] + uid_shift }}"
|
||||
mode: '750'
|
||||
become: true
|
|
@ -1,18 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: fireshare_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
fireshare_pulled_images: "{{ fireshare_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ fireshare_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,9 +0,0 @@
|
|||
ADMIN_USERNAME='{{ fireshare_secrets["admin_username"] }}'
|
||||
ADMIN_PASSWORD='{{ fireshare_secrets["admin_password"] }}'
|
||||
SECRET_KEY='{{ fireshare_secrets["secret_key"] }}'
|
||||
MINUTES_BETWEEN_VIDEO_SCANS=5
|
||||
# The location in the video thumbnails are generated. A value between 0-100 where 50 would be the frame in the middle of the video file and 0 would be the first frame of the video.
|
||||
THUMBNAIL_VIDEO_LOCATION=0
|
||||
DOMAIN=clips.{{ domain }}
|
||||
PUID={{ users['fireshare'] }}
|
||||
PGID={{ users['fireshare'] }}
|
|
@ -1,12 +0,0 @@
|
|||
services:
|
||||
fireshare:
|
||||
container_name: fireshare
|
||||
image: docker.io/shaneisrael/fireshare:latest
|
||||
restart: always
|
||||
env_file: .env
|
||||
ports:
|
||||
- 127.0.0.1:{{ ports['fireshare'] }}:80
|
||||
volumes:
|
||||
- {{ volumes['fireshare_datadir'] }}:/data
|
||||
- {{ volumes['fireshare_processeddir'] }}:/processed
|
||||
- {{ volumes['fireshare_videosdir'] }}:/videos
|
|
@ -1,24 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Install package cifs-utils
|
||||
apt:
|
||||
name: cifs-utils
|
||||
|
||||
- name: "Template {{ fstab_cifs_credentials_filename }} to /etc/{{ fstab_cifs_credentials_filename }}"
|
||||
template:
|
||||
src: "{{ fstab_cifs_credentials_filename }}"
|
||||
dest: "/etc/{{ fstab_cifs_credentials_filename }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '600'
|
||||
register: fstab_template_cifs_credentials_result
|
||||
|
||||
- name: Mount/Remount CIFS devices & edit fstab accordingly
|
||||
mount:
|
||||
state: mounted
|
||||
src: "{{ item.value.src }}"
|
||||
path: "{{ item.value.path }}"
|
||||
fstype: smb3
|
||||
opts: "uid={{ item.value.uid }},gid={{ item.value.gid }},file_mode=0{{ item.value.file_mode }},dir_mode=0{{ item.value.dir_mode }},credentials=/etc/{{ fstab_cifs_credentials_filename }},iocharset=utf8,rw,mfsymlinks,vers=3,seal"
|
||||
loop: "{{ cifs_mounts | dict2items }}"
|
|
@ -1,2 +0,0 @@
|
|||
username={{ cifs_credentials['username'] }}
|
||||
password={{ cifs_credentials['password'] }}
|
|
@ -1 +0,0 @@
|
|||
fstab_cifs_credentials_filename: storagebox-cifs-credentials.txt
|
|
@ -1,25 +0,0 @@
|
|||
- name: "Backup PostgreSQL hedgedoc database & {{ volumes['hedgedoc_uploadsdir'] }} directory"
|
||||
shell: >
|
||||
docker exec postgres
|
||||
pg_dump -c {{ role_name }} |
|
||||
borg create
|
||||
--compression lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
"{{ volumes['hedgedoc_uploadsdir'] }}"
|
||||
-
|
||||
--stdin-name dump_{{ role_name }}.sql
|
||||
environment:
|
||||
DOCKER_HOST: "{{ docker_host }}"
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,27 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & .env to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '600'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- .env
|
||||
|
||||
- name: "Create (if not exists) directory {{ volumes['hedgedoc_uploadsdir'] }} & set permissions"
|
||||
file:
|
||||
path: "{{ volumes['hedgedoc_uploadsdir'] }}"
|
||||
state: directory
|
||||
owner: "{{ users['hedgedoc'] + uid_shift }}"
|
||||
group: "{{ users['hedgedoc'] + uid_shift }}"
|
||||
mode: '700'
|
||||
become: true
|
|
@ -1,24 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: hedgedoc_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
hedgedoc_pulled_images: "{{ hedgedoc_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ hedgedoc_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and hedgedoc_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,20 +0,0 @@
|
|||
CMD_DB_DIALECT=postgres
|
||||
CMD_DB_HOST='postgres.{{ domain }}'
|
||||
CMD_DB_DATABASE=hedgedoc
|
||||
CMD_DB_USERNAME='{{ hedgedoc_secrets["postgres_user"] }}'
|
||||
CMD_DB_PASSWORD='{{ hedgedoc_secrets["postgres_password"] }}'
|
||||
CMD_DOMAIN='hedgedoc.{{ domain }}'
|
||||
CMD_PROTOCOL_USESSL=true
|
||||
CMD_SESSION_SECRET='{{ hedgedoc_secrets["session_secret"] }}'
|
||||
CMD_EMAIL=false
|
||||
|
||||
CMD_OAUTH2_PROVIDERNAME=Keycloak
|
||||
CMD_OAUTH2_CLIENT_ID='{{ hedgedoc_secrets["client_id"] }}'
|
||||
CMD_OAUTH2_CLIENT_SECRET='{{ hedgedoc_secrets["client_secret"] }}'
|
||||
CMD_OAUTH2_AUTHORIZATION_URL=https://kc.{{ domain }}/realms/master/protocol/openid-connect/auth
|
||||
CMD_OAUTH2_TOKEN_URL=https://kc.{{ domain }}/realms/master/protocol/openid-connect/token
|
||||
CMD_OAUTH2_USER_PROFILE_URL=https://kc.{{ domain }}/realms/master/protocol/openid-connect/userinfo
|
||||
CMD_OAUTH2_SCOPE=openid email profile
|
||||
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
|
||||
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
|
||||
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
|
|
@ -1,11 +0,0 @@
|
|||
services:
|
||||
hedgedoc:
|
||||
container_name: hedgedoc
|
||||
image: quay.io/hedgedoc/hedgedoc:1.10.0-alpine
|
||||
restart: always
|
||||
user: {{ users['hedgedoc'] }}:{{ users['hedgedoc'] }}
|
||||
env_file: .env
|
||||
ports:
|
||||
- 127.0.0.1:{{ ports['hedgedoc'] }}:3000
|
||||
volumes:
|
||||
- {{ volumes['hedgedoc_uploadsdir'] }}:/hedgedoc/public/uploads
|
|
@ -1,9 +0,0 @@
|
|||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,18 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & services.toml to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '644'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- services.toml
|
|
@ -1,18 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: homepage_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
homepage_pulled_images: "{{ homepage_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ homepage_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,10 +0,0 @@
|
|||
services:
|
||||
homepage:
|
||||
container_name: homepage
|
||||
image: git.ahur.ac/viyurz/homepage:latest
|
||||
restart: always
|
||||
user: {{ users['homepage'] }}:{{ users['homepage'] }}
|
||||
ports:
|
||||
- 127.0.0.1:{{ ports['homepage'] }}:8686
|
||||
volumes:
|
||||
- ./services.toml:/etc/homepage/services.toml:ro
|
|
@ -1,56 +0,0 @@
|
|||
[[services]]
|
||||
name = "Element"
|
||||
description = "Web client of Element, an instant messaging client implementing the Matrix protocol."
|
||||
domain = "element.viyurz.fr"
|
||||
language = "TypeScript"
|
||||
repository_url = "https://github.com/element-hq/element-web"
|
||||
|
||||
[[services]]
|
||||
name = "EteBase"
|
||||
description = "Server for EteSync, an end-to-end encrypted contacts, calendars, tasks and notes provider."
|
||||
domain = "etebase.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/etesync/server"
|
||||
|
||||
[[services]]
|
||||
name = "HedgeDoc"
|
||||
description = "A real-time collaborative markdown editor."
|
||||
domain = "hedgedoc.viyurz.fr"
|
||||
language = "TypeScript"
|
||||
repository_url = "https://github.com/hedgedoc/hedgedoc"
|
||||
|
||||
[[services]]
|
||||
name = "Matrix"
|
||||
description = "Synapse homeserver implemeting the Matrix protocol, an open standard for real-time communication supporting encryption and VoIP."
|
||||
domain = "matrix.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/element-hq/synapse"
|
||||
|
||||
[[services]]
|
||||
name = "SearXNG"
|
||||
description = "A privacy-respecting, hackable metasearch engine."
|
||||
domain = "searx.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/searxng/searxng"
|
||||
|
||||
[[services]]
|
||||
name = "Stalwart Mail Server"
|
||||
description = "Secure & Modern All-in-One Mail Server (IMAP, JMAP, SMTP)."
|
||||
domain = "mail.viyurz.fr"
|
||||
language = "Rust"
|
||||
repository_url = "https://github.com/stalwartlabs/mail-server"
|
||||
|
||||
[[services]]
|
||||
name = "Stump"
|
||||
description = "A comics, manga and digital book server with OPDS support."
|
||||
domain = "stump.viyurz.fr"
|
||||
language = "Rust / TypeScript"
|
||||
repository_url = "https://github.com/stumpapp/stump"
|
||||
|
||||
[[services]]
|
||||
name = "Vaultwarden"
|
||||
description = "Rust rewrite of the Bitwarden server, a password management service."
|
||||
domain = "vw.viyurz.fr"
|
||||
language = "Rust"
|
||||
repository_url = "https://github.com/dani-garcia/vaultwarden"
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
- name: Include vars from env.yml file
|
||||
include_vars:
|
||||
file: "{{ playbook_dir }}/../env.yml"
|
||||
|
||||
- name: Include secrets from secrets.yml file
|
||||
include_vars:
|
||||
file: "{{ playbook_dir }}/../secrets.yml"
|
||||
when: include_secrets | default(true) | bool
|
|
@ -1,24 +0,0 @@
|
|||
- name: "Backup PostgreSQL keycloak database"
|
||||
shell: >
|
||||
docker exec postgres
|
||||
pg_dump -c {{ role_name }} |
|
||||
borg create
|
||||
--compression lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
-
|
||||
--stdin-name dump_{{ role_name }}.sql
|
||||
environment:
|
||||
DOCKER_HOST: "{{ docker_host }}"
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,19 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template Dockerfile, docker-compose.yaml & .env to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '640'
|
||||
loop:
|
||||
- Dockerfile
|
||||
- docker-compose.yaml
|
||||
- .env
|
|
@ -1,25 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
build: true
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: keycloak_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
keycloak_pulled_images: "{{ keycloak_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ keycloak_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and keycloak_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,12 +0,0 @@
|
|||
QUARKUS_TRANSACTION_MANAGER_ENABLE_RECOVERY=true
|
||||
|
||||
#KEYCLOAK_ADMIN=
|
||||
#KEYCLOAK_ADMIN_PASSWORD=
|
||||
|
||||
KC_DB_URL_HOST=postgres.{{ domain }}
|
||||
KC_DB_URL_DATABASE=keycloak
|
||||
KC_DB_USERNAME={{ keycloak_secrets['postgres_user'] }}
|
||||
KC_DB_PASSWORD='{{ keycloak_secrets["postgres_password"] }}'
|
||||
|
||||
KC_PROXY_HEADERS=xforwarded
|
||||
KC_HOSTNAME=https://kc.{{ domain }}
|
|
@ -1,15 +0,0 @@
|
|||
FROM quay.io/keycloak/keycloak:25.0 as builder
|
||||
|
||||
ENV KC_DB=postgres
|
||||
|
||||
WORKDIR /opt/keycloak
|
||||
|
||||
RUN keytool -genkeypair -storepass password -storetype PKCS12 -keyalg RSA -keysize 2048 -dname "CN=server" -alias server -ext "SAN:c=IP:127.0.0.1" -keystore conf/server.keystore
|
||||
RUN /opt/keycloak/bin/kc.sh build
|
||||
|
||||
|
||||
FROM quay.io/keycloak/keycloak:25.0
|
||||
COPY --from=builder /opt/keycloak/ /opt/keycloak/
|
||||
|
||||
ENTRYPOINT ["/opt/keycloak/bin/kc.sh"]
|
||||
CMD ["start", "--optimized"]
|
|
@ -1,9 +0,0 @@
|
|||
services:
|
||||
keycloak:
|
||||
container_name: keycloak
|
||||
build: .
|
||||
restart: always
|
||||
user: {{ users['keycloak'] }}:{{ users['keycloak'] }}
|
||||
env_file: .env
|
||||
ports:
|
||||
- 127.0.0.1:{{ ports['keycloak'] }}:8443
|
|
@ -1,25 +0,0 @@
|
|||
- name: "Backup PostgreSQL stalwart database & {{ volumes['mailserver_datadir'] }}/etc/config.toml"
|
||||
shell: >
|
||||
docker exec postgres
|
||||
pg_dump -c {{ role_name }} |
|
||||
borg create
|
||||
--compression lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
"{{ volumes['mailserver_datadir'] }}/etc/config.toml"
|
||||
-
|
||||
--stdin-name dump_{{ role_name }}.sql
|
||||
environment:
|
||||
DOCKER_HOST: "{{ docker_host }}"
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,60 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '660'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
become: true
|
||||
|
||||
- name: "Create (if not exists) directory {{ volumes['mailserver_datadir'] }} & set permissions"
|
||||
file:
|
||||
path: "{{ volumes['mailserver_datadir'] }}"
|
||||
state: directory
|
||||
owner: "{{ users['mailserver'] + uid_shift }}"
|
||||
group: "{{ users['mailserver'] + uid_shift }}"
|
||||
mode: '700'
|
||||
become: true
|
||||
|
||||
- name: Set limited permissions on certificate directories
|
||||
file:
|
||||
path: "/etc/{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '751'
|
||||
become: true
|
||||
loop:
|
||||
- letsencrypt
|
||||
- letsencrypt/live
|
||||
- letsencrypt/archive
|
||||
|
||||
- name: Set limited permissions on certificate directories
|
||||
file:
|
||||
path: "/etc/letsencrypt/{{ item }}/mail.{{ domain }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: "{{ host_uid }}"
|
||||
mode: '550'
|
||||
become: true
|
||||
loop:
|
||||
- live
|
||||
- archive
|
||||
|
||||
- name: Set limited permissions on certificate key file
|
||||
file:
|
||||
path: "/etc/letsencrypt/live/mail.{{ domain }}/privkey.pem"
|
||||
owner: root
|
||||
group: "{{ users['mailserver'] + uid_shift }}"
|
||||
mode: '640'
|
||||
become: true
|
|
@ -1,24 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: mailserver_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
mailserver_pulled_images: "{{ mailserver_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ mailserver_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and mailserver_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,15 +0,0 @@
|
|||
services:
|
||||
mailserver:
|
||||
container_name: mailserver
|
||||
image: docker.io/stalwartlabs/mail-server:v0.10.2
|
||||
restart: always
|
||||
user: "{{ users['mailserver'] }}:{{ users['mailserver'] }}"
|
||||
ports:
|
||||
- "{{ ports['mailserver_smtp'] }}:25"
|
||||
- {{ ports['mailserver_smtps'] }}:465
|
||||
- {{ ports['mailserver_imaps'] }}:993
|
||||
- {{ ports['mailserver_https'] }}:443
|
||||
volumes:
|
||||
- {{ volumes['mailserver_tls_certificate_file'] }}:/etc/fullchain.pem:ro
|
||||
- {{ volumes['mailserver_tls_certificate_key_file'] }}:/etc/privkey.pem:ro
|
||||
- {{ volumes['mailserver_datadir'] }}:/opt/stalwart-mail
|
|
@ -1,24 +0,0 @@
|
|||
- name: "Backup PostgreSQL vaultwarden database"
|
||||
shell: >
|
||||
docker exec postgres
|
||||
pg_dump -c {{ role_name }} |
|
||||
borg create
|
||||
--compression lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
-
|
||||
--stdin-name dump_{{ role_name }}.sql
|
||||
environment:
|
||||
DOCKER_HOST: "{{ docker_host }}"
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
|
@ -1,28 +0,0 @@
|
|||
- name: "(Re)Create {{ project_dir }} project directory"
|
||||
file:
|
||||
path: "{{ project_dir }}"
|
||||
state: "{{ item }}"
|
||||
loop:
|
||||
- absent
|
||||
- directory
|
||||
|
||||
- name: Template docker-compose.yaml & .env to project directory
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ project_dir }}/{{ item }}"
|
||||
owner: "{{ host_uid }}"
|
||||
group: "{{ host_uid }}"
|
||||
mode: '640'
|
||||
loop:
|
||||
- docker-compose.yaml
|
||||
- .env
|
||||
|
||||
- name: "Create (if not exists) directory {{ volumes['vaultwarden_datadir'] }} & set permissions"
|
||||
file:
|
||||
path: "{{ volumes['vaultwarden_datadir'] }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
owner: "{{ users['vaultwarden'] + uid_shift }}"
|
||||
group: "{{ users['vaultwarden'] + uid_shift }}"
|
||||
mode: '770'
|
||||
become: true
|
|
@ -1,24 +0,0 @@
|
|||
- name: Pull project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
recreate: never
|
||||
pull: always
|
||||
debug: true
|
||||
when: docker_pull_images | bool
|
||||
register: vaultwarden_docker_compose_pull_result
|
||||
|
||||
- name: Display pulled image(s) name
|
||||
set_fact:
|
||||
vaultwarden_pulled_images: "{{ vaultwarden_pulled_images | default([]) + [item.pulled_image.name] }}"
|
||||
loop: "{{ vaultwarden_docker_compose_pull_result['actions'] | default([]) | selectattr('pulled_image', 'defined') }}"
|
||||
|
||||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
# Make a backup if we didn't already make one and we pulled a new image
|
||||
when: not run_backup | default(false) and vaultwarden_pulled_images is defined
|
||||
|
||||
- name: Create/Restart project services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ project_dir }}"
|
||||
state: "{{ run_setup | default(false) | bool | ternary('restarted', 'present') }}"
|
|
@ -1,12 +0,0 @@
|
|||
ADMIN_TOKEN='{{ vaultwarden_secrets["admin_token_hash"] }}'
|
||||
DOMAIN=https://vw.{{ domain }}
|
||||
SIGNUPS_ALLOWED=false
|
||||
|
||||
DATABASE_URL=postgresql://{{ vaultwarden_secrets['postgres_user'] }}:{{ vaultwarden_secrets['postgres_password'] }}@postgres.{{ domain }}:{{ ports['postgres'] }}/vaultwarden
|
||||
|
||||
SMTP_HOST=mail.{{ domain }}
|
||||
SMTP_FROM=vaultwarden@{{ domain }}
|
||||
SMTP_PORT={{ ports['mailserver_smtps'] }}
|
||||
SMTP_SECURITY=force_tls
|
||||
SMTP_USERNAME='{{ vaultwarden_secrets["smtp_username"] }}'
|
||||
SMTP_PASSWORD='{{ vaultwarden_secrets["smtp_password"] }}'
|
|
@ -1,32 +0,0 @@
|
|||
services:
|
||||
minecraft:
|
||||
container_name: minecraft
|
||||
image: docker.io/itzg/minecraft-server:latest
|
||||
restart: always
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.8'
|
||||
environment:
|
||||
UID: 1011
|
||||
GID: 1011
|
||||
VERSION: 1.21.1
|
||||
EULA: "TRUE"
|
||||
MEMORY: 1.25G
|
||||
ENABLE_COMMAND_BLOCK: "true"
|
||||
MOTD: "Fjeaj"
|
||||
OPS: |
|
||||
Viyurz
|
||||
TYPE: FABRIC
|
||||
MODS: |
|
||||
https://cdn.modrinth.com/data/gvQqBUqZ/versions/5szYtenV/lithium-fabric-mc1.21.1-0.13.0.jar
|
||||
https://cdn.modrinth.com/data/uXXizFIs/versions/wmIZ4wP4/ferritecore-7.0.0-fabric.jar
|
||||
ports:
|
||||
- "3690:25565"
|
||||
- "25565:25565"
|
||||
volumes:
|
||||
- minecraft:/data
|
||||
|
||||
volumes:
|
||||
minecraft:
|
||||
name: minecraft
|
|
@ -1,22 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Install package nftables
|
||||
apt:
|
||||
name: nftables
|
||||
|
||||
- name: Template nftables.conf to /etc/nftables.conf
|
||||
template:
|
||||
src: nftables.conf
|
||||
dest: /etc/nftables.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '755'
|
||||
register: nftables_template_conf_result
|
||||
|
||||
- name: Restart nftables service
|
||||
service:
|
||||
name: nftables
|
||||
state: restarted
|
||||
enabled: true
|
||||
when: nftables_template_conf_result['changed']
|
|
@ -1,88 +0,0 @@
|
|||
#!/usr/sbin/nft -f
|
||||
|
||||
flush ruleset
|
||||
|
||||
table inet nat {
|
||||
chain prerouting {
|
||||
type nat hook prerouting priority dstnat;
|
||||
iif eth0 tcp dport {{ ports['syncthing_relaysrv'] }} redirect to :22067
|
||||
iif eth0 tcp dport 25 redirect to :{{ ports['mailserver_smtp'] }}
|
||||
iif eth0 tcp dport 465 redirect to :{{ ports['mailserver_smtps'] }}
|
||||
iif eth0 tcp dport 993 redirect to :{{ ports['mailserver_imaps'] }}
|
||||
}
|
||||
}
|
||||
|
||||
table inet filter {
|
||||
set blackhole_ipv4 {
|
||||
type ipv4_addr
|
||||
timeout 30s
|
||||
flags dynamic
|
||||
}
|
||||
|
||||
set blackhole_ipv6 {
|
||||
type ipv6_addr
|
||||
timeout 30s
|
||||
flags dynamic
|
||||
}
|
||||
|
||||
chain input {
|
||||
type filter hook input priority 0; policy drop;
|
||||
|
||||
iif lo accept
|
||||
|
||||
# Block all IPs in blackhole
|
||||
ip saddr @blackhole_ipv4 set update ip saddr @blackhole_ipv4 drop
|
||||
ip6 saddr @blackhole_ipv6 set update ip6 saddr @blackhole_ipv6 drop
|
||||
|
||||
ct state invalid drop
|
||||
ct state { established, related } accept
|
||||
|
||||
# Prevent DDoS
|
||||
# Rate limiting
|
||||
meta nfproto ipv4 meter ratelimit4 \
|
||||
{ ip saddr limit rate over 75/second burst 15 packets } \
|
||||
add @blackhole_ipv4 { ip saddr } counter
|
||||
meta nfproto ipv6 meter ratelimit6 \
|
||||
{ ip6 saddr limit rate over 75/second burst 15 packets } \
|
||||
add @blackhole_ipv6 { ip6 saddr } counter
|
||||
# Max concurrent connections
|
||||
meta nfproto ipv4 meter connlimit4 \
|
||||
{ ip saddr ct count over 100 } add @blackhole_ipv4 { ip saddr } counter
|
||||
meta nfproto ipv6 meter connlimit6 \
|
||||
{ ip6 saddr ct count over 100 } add @blackhole_ipv6 { ip6 saddr } counter
|
||||
|
||||
# Allow ICMP
|
||||
meta l4proto icmp accept
|
||||
meta l4proto ipv6-icmp accept
|
||||
|
||||
# HTTP/S
|
||||
tcp dport { http, https } accept
|
||||
|
||||
# SSH
|
||||
tcp dport ssh accept
|
||||
|
||||
# SMTP/IMAP
|
||||
tcp dport { {{ ports['mailserver_smtp'] }}, {{ ports['mailserver_smtps'] }}, {{ ports['mailserver_imaps'] }} } accept
|
||||
|
||||
# Syncthing
|
||||
tcp dport { {{ ports['syncthing_tcp'] }}, 22067 } accept
|
||||
udp dport {{ ports['syncthing_udp'] }} accept
|
||||
|
||||
# Coturn
|
||||
tcp dport { {{ ports['coturn_listening'] }}, {{ ports['coturn_tls_listening'] }} } accept
|
||||
udp dport { {{ ports['coturn_listening'] }}, {{ ports['coturn_tls_listening'] }}, {{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }} } accept
|
||||
|
||||
}
|
||||
|
||||
chain forward {
|
||||
type filter hook forward priority 0; policy accept;
|
||||
}
|
||||
|
||||
chain output {
|
||||
type filter hook output priority 0; policy accept;
|
||||
|
||||
# Don't waste resources responding to blocked IPs
|
||||
ip daddr @blackhole_ipv4 reject
|
||||
ip6 daddr @blackhole_ipv6 reject
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
- name:
|
||||
become: true
|
||||
block:
|
||||
- name: Install package nginx
|
||||
apt:
|
||||
name: nginx
|
||||
|
||||
- name: Delete directories in /etc/nginx/
|
||||
file:
|
||||
path: "/etc/nginx/{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- sites-enabled
|
||||
- snippets
|
||||
|
||||
- name: Create directories in /etc/nginx/
|
||||
file:
|
||||
path: "/etc/nginx/{{ item }}"
|
||||
state: directory
|
||||
loop:
|
||||
- sites-enabled
|
||||
- snippets
|
||||
|
||||
- name: Template configuration files to /etc/nginx/
|
||||
template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/etc/nginx/{{ item.path }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '644'
|
||||
with_filetree: ../templates/
|
||||
when: item.state == 'file'
|
||||
|
||||
- name: Get state of file /etc/nginx/dhparam.txt
|
||||
stat:
|
||||
path: /etc/nginx/dhparam.txt
|
||||
register: nginx_stat_dhparam_result
|
||||
|
||||
- name: Download dhparam file from Mozilla
|
||||
get_url:
|
||||
url: https://ssl-config.mozilla.org/ffdhe2048.txt
|
||||
dest: /etc/nginx/dhparam.txt
|
||||
when: not nginx_stat_dhparam_result.stat.exists
|
||||
|
||||
- name: Set correct permissions on certificate directories
|
||||
file:
|
||||
path: "/etc/letsencrypt/{{ item }}/{{ domain }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '750'
|
||||
loop:
|
||||
- live
|
||||
- archive
|
||||
|
||||
- name: Start/Reload NGINX service
|
||||
service:
|
||||
name: nginx
|
||||
state: reloaded
|
||||
enabled: yes
|
|
@ -1,38 +0,0 @@
|
|||
user www-data;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 1024;
|
||||
include /etc/nginx/modules-enabled/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 512;
|
||||
multi_accept off;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
|
||||
gzip off;
|
||||
server_tokens off;
|
||||
keepalive_timeout 30;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
|
||||
# Needed to support websocket connections
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' "";
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
include /etc/nginx/snippets/ssl.conf;
|
||||
include /etc/nginx/snippets/ssl-headers.conf;
|
||||
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name clips.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['fireshare'] }};
|
||||
|
||||
client_max_body_size 500M;
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
# Redirect HTTP to HTTPS
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
return 308 https://$host$request_uri;
|
||||
}
|
||||
|
||||
# Default HTTPS server
|
||||
server {
|
||||
listen 443 ssl http2 default_server;
|
||||
listen [::]:443 ssl http2 default_server;
|
||||
|
||||
return 404;
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name dl.{{ domain }};
|
||||
|
||||
root /var/www/html;
|
||||
autoindex on;
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name etebase.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['etebase'] }};
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name hedgedoc.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['hedgedoc'] }};
|
||||
}
|
||||
|
||||
location /socket.io/ {
|
||||
proxy_pass http://127.0.0.1:{{ ports['hedgedoc'] }};
|
||||
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name {{ domain }};
|
||||
|
||||
location = /.well-known/matrix/server {
|
||||
default_type application/json;
|
||||
|
||||
return 200 '{ "m.server": "matrix.{{ domain }}:443" }';
|
||||
}
|
||||
|
||||
location = /.well-known/matrix/client {
|
||||
default_type application/json;
|
||||
|
||||
include /etc/nginx/snippets/ssl-headers.conf;
|
||||
add_header Access-Control-Allow-Origin '*';
|
||||
|
||||
return 200 '{ "m.homeserver": { "base_url": "https://matrix.{{ domain }}" } }';
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['homepage'] }};
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name kc.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass https://127.0.0.1:{{ ports['keycloak'] }};
|
||||
|
||||
#include /etc/nginx/snippets/websocket.conf;
|
||||
#include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name mail.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass https://127.0.0.1:{{ ports['mailserver_https'] }};
|
||||
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name autoconfig.{{ domain }};
|
||||
|
||||
location / {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location = /mail/config-v1.1.xml {
|
||||
proxy_pass https://127.0.0.1:{{ ports['mailserver_https'] }};
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name mta-sts.{{ domain }};
|
||||
|
||||
location / {
|
||||
return 404;
|
||||
}
|
||||
|
||||
location = /.well-known/mta-sts.txt {
|
||||
proxy_pass https://127.0.0.1:{{ ports['mailserver_https'] }};
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name searx.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['searxng'] }};
|
||||
|
||||
include /etc/nginx/snippets/ssl-headers.conf;
|
||||
add_header Content-Security-Policy "upgrade-insecure-requests; default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; form-action 'self' https://github.com/searxng/searxng/issues/new; font-src 'self'; frame-ancestors 'self'; base-uri 'self'; connect-src 'self' https://overpass-api.de; img-src 'self' data: https://*.tile.openstreetmap.org; frame-src https://www.youtube-nocookie.com https://player.vimeo.com https://www.dailymotion.com https://www.deezer.com https://www.mixcloud.com https://w.soundcloud.com https://embed.spotify.com";
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name sex.{{ domain }};
|
||||
|
||||
root /var/www/sex;
|
||||
|
||||
location / {
|
||||
random_index on;
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name stump.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['stump'] }};
|
||||
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name matrix.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['synapse'] }};
|
||||
|
||||
client_max_body_size {{ synapse['max_upload_size'] }};
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name stdisco.{{ domain }};
|
||||
|
||||
ssl_verify_client optional_no_ca;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['syncthing_discosrv'] }};
|
||||
|
||||
proxy_set_header X-Client-Port $remote_port;
|
||||
proxy_set_header X-SSL-Cert $ssl_client_cert;
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name status.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ ports['uptime_kuma'] }};
|
||||
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
upstream vaultwarden {
|
||||
zone vaultwarden 64k;
|
||||
server 127.0.0.1:{{ ports['vaultwarden'] }};
|
||||
keepalive 2;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name vw.{{ domain }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://vaultwarden;
|
||||
|
||||
include /etc/nginx/snippets/websocket.conf;
|
||||
include /etc/nginx/snippets/proxy.conf;
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Scheme $scheme;
|
||||
proxy_set_header X-Forwarded-URI $request_uri;
|
||||
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
|
@ -1,3 +0,0 @@
|
|||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
# add_header X-Robots-Tag "noindex, nofollow" always;
|
||||
add_header Set-Cookie "Path=/; HttpOnly; Secure";
|
|
@ -1,18 +0,0 @@
|
|||
ssl_certificate /etc/letsencrypt/live/{{ domain }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ domain }}/privkey.pem;
|
||||
ssl_trusted_certificate /etc/letsencrypt/live/{{ domain }}/chain.pem;
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
|
||||
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
|
||||
ssl_dhparam /etc/nginx/dhparam.txt;
|
||||
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_cache shared:MozSSL:10m;
|
||||
ssl_session_tickets off;
|
||||
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
|
@ -1,2 +0,0 @@
|
|||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
|
@ -1,24 +0,0 @@
|
|||
- name: Create borg backup from PostgreSQL dumpall
|
||||
shell: >
|
||||
docker exec postgres
|
||||
pg_dumpall |
|
||||
borg create
|
||||
--compression lzma
|
||||
"{{ borg_repodir }}::{{ role_name }}-{now:%Y-%m-%d_%H-%M-%S}"
|
||||
-
|
||||
--stdin-name dumpall.sql
|
||||
environment:
|
||||
DOCKER_HOST: "{{ docker_host }}"
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
||||
|
||||
- name: Prune borg repository
|
||||
command:
|
||||
cmd: |
|
||||
borg prune
|
||||
--glob-archives='{{ role_name }}-*'
|
||||
{{ borg_prune_options }}
|
||||
{{ borg_repodir }}
|
||||
environment:
|
||||
BORG_PASSCOMMAND: "cat {{ borg_passphrase_file }}"
|
||||
become: true
|
|
@ -1,14 +0,0 @@
|
|||
- name: Include backup tasks
|
||||
include_tasks:
|
||||
file: backup.yml
|
||||
when: run_backup | default(false) | bool
|
||||
|
||||
- name: Include setup tasks
|
||||
include_tasks:
|
||||
file: setup.yml
|
||||
when: run_setup | default(false) | bool
|
||||
|
||||
- name: Include update tasks
|
||||
include_tasks:
|
||||
file: update.yml
|
||||
when: run_update | default(false) | bool
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue