The Great Ansible Update.

This commit is contained in:
Viyurz 2024-02-17 19:01:04 +01:00
parent f264c34304
commit 474ca92782
82 changed files with 945 additions and 427 deletions

5
.gitignore vendored
View file

@ -1,4 +1 @@
coturn/secrets.conf secrets.yml
.env
synapse/matrix_access_token.txt
synapse/secrets.yaml

20
README.md Normal file
View file

@ -0,0 +1,20 @@
# vps
This repository contains all the files I use to manage services hosted on [viyurz.fr](https://viyurz.fr).
## Requirements
Ansible:
```
sudo apt install -y ansible
```
Setup SSL certificates with Certbot beforehand:
```
sudo apt install -y certbot python3-certbot-dns-ovh
```
## Secrets
Copy the existing `secrets.yml.example` to `secrets.yml`, run `ansible-vault encrypt secrets.yml` to encrypt the file with a password, and finally edit the newly encrypted file with `ansible-vault edit secrets.yml`.
If you want to change the vault password run `ansible-vault rekey secrets.yml`.

27
ansible-playbook-selector.sh Executable file
View file

@ -0,0 +1,27 @@
#!/bin/bash
script_relative_path="$(dirname "$0")"
declare -i playbook_number=1
mapfile -t playbook_list < <(find "$script_relative_path/playbooks" -type f | grep -oP '[^/]+\.yml$')
echo "Playbook list:"
for playbook in "${playbook_list[@]}"; do
echo " [$playbook_number] $playbook"
playbook_number+=1
done
read -rp "Select playbook number to execute: " selected_playbook_number
selected_playbook_name="${playbook_list[((selected_playbook_number - 1))]}"
if ! echo "$selected_playbook_number" | grep -qP '^[1-9][0-9]*$' || [[ -z "$selected_playbook_name" ]]; then
echo "Invalid playbook number entered."
exit 1
fi
echo "Selected playbook: $selected_playbook_name"
ansible-playbook "$script_relative_path/playbooks/$selected_playbook_name"

3
ansible.cfg Normal file
View file

@ -0,0 +1,3 @@
[defaults]
roles_path = ./roles
ask_vault_pass = True

View file

@ -1,17 +0,0 @@
#!/bin/bash
# If command starts with an option, prepend it with a `turnserver` binary.
if [ "${1:0:1}" == '-' ]; then
set -- turnserver "$@"
fi
# Evaluate each argument separately to avoid mixing them up in a single `eval`.
expanded=()
for i in "$@"; do
expanded+=("$(eval "echo $i")")
done
cp /etc/coturn/server.conf /tmp/turnserver.conf
cat /etc/coturn/secrets.conf >> /tmp/turnserver.conf
exec "${expanded[@]}"

View file

@ -1,25 +0,0 @@
services:
coturn:
container_name: coturn
image: coturn/coturn:alpine
restart: always
user: '666:666'
command:
- "--log-file=stdout"
- "-c"
- "/tmp/turnserver.conf"
ports:
- 3478:3478
- 3478:3478/udp
- 5349:5349
- 5349:5349/udp
- 49152-49172:49152-49172/udp
tmpfs:
- /var/lib/coturn
volumes:
- ./turnserver.conf:/etc/coturn/server.conf
- ./coturn-docker-entrypoint.sh:/usr/local/bin/docker-entrypoint.sh
- ./secrets.conf:/etc/coturn/secrets.conf
- /etc/letsencrypt/live/turn.viyurz.fr/fullchain.pem:/etc/coturn/cert.pem
# chown root:666 & chmod 640 on privkey file
- /etc/letsencrypt/live/turn.viyurz.fr/privkey.pem:/etc/coturn/pkey.pem

View file

@ -1 +0,0 @@
static-auth-secret=XXX

View file

@ -1,3 +0,0 @@
{
"default_server_name": "viyurz.fr"
}

105
env.yml Normal file
View file

@ -0,0 +1,105 @@
domain: viyurz.fr
timezone: "Europe/Paris"
host_uid: 1000
docker_projects_dir: "{{ ansible_env['HOME'] }}/docker-projects"
# UID shift for mapping between host & containers
uid_shift: 99999
cifs_host: "{{ cifs_credentials['username'] }}.your-storagebox.de"
cifs_mounts:
backups:
src: "//{{ cifs_host }}/backup/backups"
path: /mnt/storagebox/backups
uid: 0
gid: "{{ host_uid }}"
file_mode: 640
dir_mode: 750
storagebox:
src: "//{{ cifs_host }}/backup"
path: /mnt/storagebox
uid: 0
gid: 0
file_mode: 640
dir_mode: 751
syncthing:
src: "//{{ cifs_host }}/backup/syncthing"
path: /mnt/storagebox/syncthing
uid: "{{ users['syncthing'] + uid_shift }}"
gid: "{{ users['syncthing'] + uid_shift }}"
file_mode: 640
dir_mode: 750
projects:
- coturn
- element
- etebase
- hedgedoc
- homepage
- reverse-proxy
- searxng
- synapse
- syncthing
- vaultwarden
# Ports exposed to host
ports:
coturn_listening: 3478
coturn_tls_listening: 5349
coturn_relay_min: 49152
coturn_relay_max: 49172
element: 8084
etebase: 3735
hedgedoc: 8086
homepage: 8082
searxng: 8083
synapse: 8008
syncthing_discosrv: 8443
syncthing_webui: 8384
syncthing_tcp: 5432
syncthing_udp: 22000
vaultwarden: 8081
# UID in containers
users:
coturn: 666
etebase: 373
hedgedoc: 1004
hedgedoc_mysql: 1005
homepage: 101
searxng: 977
searxng_redis: 999
synapse: 991
synapse_postgres: 70
syncthing: 1001
syncthing_discosrv: 1002
vaultwarden: 1000
volumes:
coturn_tls_certificate_file: "/etc/letsencrypt/live/turn.{{ domain }}/fullchain.pem"
coturn_tls_certificate_key_file: "/etc/letsencrypt/live/turn.{{ domain }}/privkey.pem"
etebase_datadir: /mnt/etebasedata
hedgedoc_mysql_datadir: /mnt/hedgedoc/mysql-data
hedgedoc_configdir: /mnt/hedgedoc/config
synapse_datadir: /mnt/synapsedata
synapse_postgres_datadir: /mnt/synapsepgdata
syncthing_datadir: "{{ cifs_mounts['syncthing']['path'] }}"
vaultwarden_datadir: /mnt/vwdata
# Service-specific variables
reverse_proxy:
ssl_certificate_file: "/etc/letsencrypt/live/{{ domain }}/fullchain.pem"
ssl_certificate_key_file: "/etc/letsencrypt/live/{{ domain }}/privkey.pem"
ssl_trusted_certificate_file: "/etc/letsencrypt/live/{{ domain }}/chain.pem"
resolver: "185.12.64.12 [a01:4ff:ff00::add:2] [2a01:4ff:ff00::add:1]"
synapse:
max_upload_size: 50M

View file

@ -1,15 +0,0 @@
services:
etebase:
image: victorrds/etebase:alpine
container_name: etebase
restart: always
user: '373:373'
environment:
SERVER: http
ALLOWED_HOSTS: etebase.viyurz.fr
SUPER_USER: v444599a8zJUBud60fu9uk9Vo3xXHinp
AUTO_UPDATE: 'true'
ports:
- 3735:3735
volumes:
- /mnt/etebasedata:/data

View file

@ -1,19 +0,0 @@
services:
nextcloud:
image: nextcloud/all-in-one:latest
restart: always
container_name: nextcloud-aio-mastercontainer # This line is not allowed to be changed as otherwise AIO will not work correctly
environment: # Is needed when using any of the options below
- APACHE_PORT=11000 # Is needed when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
- APACHE_IP_BINDING=127.0.0.1 # Should be set when running behind a web server or reverse proxy (like Apache, Nginx, Cloudflare Tunnel and else) that is running on the same host. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
- NEXTCLOUD_DATADIR=/mnt/ncdata # Allows to set the host directory for Nextcloud's datadir. ⚠️⚠️⚠️ Warning: do not set or adjust this value after the initial Nextcloud installation is done! See https://github.com/nextcloud/all-in-one#how-to-change-the-default-location-of-nextclouds-datadir
- WATCHTOWER_DOCKER_SOCKET_PATH=$XDG_RUNTIME_DIR/docker.sock
ports:
- 8080:8080
volumes:
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config # This line is not allowed to be changed as otherwise the built-in backup solution will not work
- $XDG_RUNTIME_DIR/docker.sock:/var/run/docker.sock:ro # May be changed on macOS, Windows or docker rootless. See the applicable documentation. If adjusting, don't forget to also set 'WATCHTOWER_DOCKER_SOCKET_PATH'!
volumes:
nextcloud_aio_mastercontainer:
name: nextcloud_aio_mastercontainer # This line is not allowed to be changed as otherwise the built-in backup solution will not work

View file

@ -1,8 +0,0 @@
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
-----END DH PARAMETERS-----

View file

@ -1,39 +0,0 @@
#!/bin/bash
if [[ $UID -ne 0 ]]; then
echo "This script must be run as root."
exit 1
fi
# Chemin relatif pour les cas où
# le script n'est pas exécuté depuis
# le répertoire où il se trouve.
rel_path="$(dirname "$0")"
# Fichiers requis pour le script
files=('dhparam.txt' 'nginx.conf' 'reverse-proxy.conf')
for file in "${files[@]}"; do
if ! [[ -f "$rel_path/$file" ]]; then
echo "Required file $file is missing, exiting."
exit 1
fi
done
if [[ ! -x /usr/sbin/nginx ]]; then
apt install -y nginx
fi
cp "$rel_path/nginx.conf" /etc/nginx/
cp "$rel_path/reverse-proxy.conf" /etc/nginx/sites-available/
cp "$rel_path/dhparam.txt" /etc/nginx/
rm /etc/nginx/sites-enabled/*
ln -s /etc/nginx/sites-available/reverse-proxy.conf /etc/nginx/sites-enabled/reverse-proxy.conf
systemctl start nginx
systemctl reload nginx

View file

@ -0,0 +1,10 @@
# Playbook to mount CIFS mounts defined in env.yml
- name: Include variables files
hosts: localhost
roles:
- include-vars
- name: Edit fstab configuration & mount CIFS devices
hosts: localhost
roles:
- fstab

View file

@ -0,0 +1,5 @@
- name: Setup Docker rootless
hosts: localhost
roles:
- include-vars
- dockerd

View file

@ -0,0 +1,28 @@
- name: Include variables files & load nftables.conf
hosts: localhost
roles:
- include-vars
- nftables
- name: Update project(s)
hosts: localhost
vars_prompt:
- name: selected_projects
prompt: "Choose projects to update (Keep empty to update all. Projects list: {{ hostvars['localhost']['projects'] }})"
private: false
unsafe: true
- name: docker_pull_images
prompt: "Pull project(s) images?"
default: false
private: false
tasks:
- name: Update project(s)
include_role:
name: "{{ project }}"
loop: "{{ (selected_projects | split) | default(projects, true) }}"
loop_control:
# Do not use default variable name 'item' to prevent collisions with loops in roles.
loop_var: project
when: project in projects

View file

@ -1,17 +0,0 @@
80/443 -> NGINX reverse proxy
995 -> SSH
3478 -> coturn
3735 -> Etebase
5349 -> coturn
5432 -> Syncthing
8008 -> Synapse
8080 -> Nextcloud AIO
8081 -> Vaultwarden
8082 -> nginx-www
8083 -> SearXNG
8084 -> element-web
8384 -> Syncthing Web UI
8443 -> stdisco
11000 -> Nextcloud
22000 -> Syncthing
49152-49652/udp -> coturn

View file

@ -0,0 +1,68 @@
- name: "Create {{ coturn_project_dir }} project directory"
file:
path: "{{ coturn_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ coturn_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Template turnserver.conf to project directory
template:
src: turnserver.conf
dest: "{{ coturn_project_dir }}/turnserver.conf"
owner: "{{ ansible_env['USER'] }}"
mode: '640'
# Store result to restart services if the file changed
register: coturn_template_turnserver_result
# Separate task because template module cannot chown/chgrp to a non-existing user/group
- name: "Change group of turnserver.conf to coturn GID ({{ users['coturn'] + uid_shift }})"
file:
path: "{{ coturn_project_dir }}/turnserver.conf"
group: "{{ users['coturn'] + uid_shift }}"
become: true
- name: Set limited permissions on certificate directories
file:
path: "/etc/{{ item }}"
state: directory
owner: root
group: root
mode: '751'
become: true
loop:
- letsencrypt
- letsencrypt/live
- letsencrypt/archive
- name: Set limited permissions on certificate directories
file:
path: "/etc/letsencrypt/{{ item }}/turn.{{ domain }}"
state: directory
owner: "{{ host_uid }}"
group: "{{ users['coturn'] + uid_shift }}"
mode: '550'
become: true
loop:
- live
- archive
- name: Set limited permissions on certificate key file
file:
path: "/etc/letsencrypt/live/turn.{{ domain }}/privkey.pem"
owner: root
group: "{{ users['coturn'] + uid_shift }}"
mode: '640'
become: true
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ coturn_project_dir }}"
pull: "{{ docker_pull_images | bool }}"
# Restart if config file(s) changed
restarted: "{{ coturn_template_turnserver_result['changed'] | bool }}"

View file

@ -0,0 +1,18 @@
services:
coturn:
container_name: coturn
image: coturn/coturn:alpine
restart: always
user: {{ users['coturn'] }}:{{ users['coturn'] }}
ports:
- {{ ports['coturn_listening'] }}:{{ ports['coturn_listening'] }}
- {{ ports['coturn_listening'] }}:{{ ports['coturn_listening'] }}/udp
- {{ ports['coturn_tls_listening'] }}:{{ ports['coturn_tls_listening'] }}
- {{ ports['coturn_tls_listening'] }}:{{ ports['coturn_tls_listening'] }}/udp
- {{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }}:{{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }}/udp
tmpfs:
- /var/lib/coturn
volumes:
- ./turnserver.conf:/etc/coturn/turnserver.conf
- {{ volumes['coturn_tls_certificate_file'] }}:/etc/coturn/cert.pem
- {{ volumes['coturn_tls_certificate_key_file'] }}:/etc/coturn/pkey.pem

View file

@ -1,19 +1,19 @@
# Required behind NAT listening-port={{ ports['coturn_listening'] }}
external-ip=167.235.49.84 tls-listening-port={{ ports['coturn_tls_listening'] }}
#external-ip=2a01:4f8:c0c:a25a::1
# Lower and upper bounds of the UDP relay endpoints: # Lower and upper bounds of the UDP relay endpoints:
# (default values are 49152 and 65535) # (default values are 49152 and 65535)
min-port=49152 min-port={{ ports['coturn_relay_min'] }}
max-port=49172 max-port={{ ports['coturn_relay_max'] }}
#verbose #verbose
fingerprint fingerprint
# Credentials in secrets.conf (static-auth-secret) # Credentials in secrets.conf (static-auth-secret)
use-auth-secret use-auth-secret
static-auth-secret={{ coturn_secrets['static_auth_secret'] }}
realm=turn.viyurz.fr realm=turn.{{ domain }}
# TLS certificates, including intermediate certs. # TLS certificates, including intermediate certs.
# For Let's Encrypt certificates, use `fullchain.pem` here. # For Let's Encrypt certificates, use `fullchain.pem` here.

View file

@ -0,0 +1 @@
coturn_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,63 @@
- name: Make sure required packages are installed
apt:
name:
- docker.io
- docker-compose
- rootlesskit
- slirp4netns
- uidmap
become: true
- name: Make sure system-wide Docker daemon is stopped & disabled
service:
name: docker
state: stopped
enabled: false
become: true
- name: Make sure system-wide Docker socket is stopped & disabled
service:
name: docker.socket
state: stopped
enabled: false
become: true
- name: Run dockerd-rootless-setuptool.sh script
command:
cmd: /usr/share/docker.io/contrib/dockerd-rootless-setuptool.sh install
- name: Make sure /usr/share/docker.io/contrib is in PATH variable
lineinfile:
path: "{{ ansible_env['HOME'] }}/.profile"
regex: '^export PATH="/usr/share/docker\.io/contrib'
line: 'export PATH="/usr/share/docker.io/contrib:$PATH"'
- name: Make sure DOCKER_HOST variable is set correctly
lineinfile:
path: "{{ ansible_env['HOME'] }}/.profile"
regex: '^export DOCKER_HOST='
line: "export DOCKER_HOST=unix:///run/user/{{ host_uid }}/docker.sock"
- name: "Make sure lingering is enabled for user {{ ansible_env['USER'] }}"
command:
cmd: "loginctl enable-linger {{ ansible_env['USER'] }}"
become: true
- name: "Create directory {{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d to override environment variables"
file:
path: "{{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d"
state: directory
- name: Add environment variables to Docker user service to use slirp4netns RootlessKit port driver, which enables source IP propagation
copy:
dest: "{{ ansible_env['HOME'] }}/.config/systemd/user/docker.service.d/override.conf"
content: |
[Service]
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns"
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns"
- name: Start/restart & enable Docker user service
service:
name: docker
state: restarted
enabled: true

View file

@ -0,0 +1,28 @@
- name: "Create {{ element_project_dir }} project directory"
file:
path: "{{ element_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ element_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Template config.json to project directory
template:
src: config.json
dest: "{{ element_project_dir }}/config.json"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '644'
register: element_template_config_result
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ element_project_dir }}"
pull: "{{ docker_pull_images | bool }}"
# Restart if config file(s) changed
restarted: "{{ element_template_config_result['changed'] | bool }}"

View file

@ -0,0 +1,3 @@
{
"default_server_name": "{{ domain }}"
}

View file

@ -4,6 +4,6 @@ services:
image: vectorim/element-web:latest image: vectorim/element-web:latest
restart: always restart: always
ports: ports:
- 8084:80 - {{ ports['element'] }}:80
volumes: volumes:
- ./config.json:/app/config.json - ./config.json:/app/config.json

View file

@ -0,0 +1 @@
element_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,26 @@
- name: "Create {{ etebase_project_dir }} project directory"
file:
path: "{{ etebase_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ etebase_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: "Create directory {{ volumes['etebase_datadir'] }} with correct permissions"
file:
path: "{{ volumes['etebase_datadir'] }}"
state: directory
owner: "{{ users['etebase'] + uid_shift }}"
group: "{{ users['etebase'] + uid_shift }}"
mode: '770'
become: true
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ etebase_project_dir }}"
pull: "{{ docker_pull_images | bool }}"

View file

@ -0,0 +1,15 @@
services:
etebase:
image: victorrds/etebase:alpine
container_name: etebase
restart: always
user: {{ users['etebase'] }}:{{ users['etebase'] }}
environment:
SERVER: http
ALLOWED_HOSTS: etebase.{{ domain }}
AUTO_UPDATE: 'true'
TIME_ZONE: {{ timezone }}
ports:
- {{ ports['etebase'] }}:3735
volumes:
- {{ volumes['etebase_datadir'] }}:/data

View file

@ -0,0 +1 @@
etebase_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,24 @@
- name:
become: true
block:
- name: Install package cifs-utils
apt:
name: cifs-utils
- name: "Template {{ fstab_cifs_credentials_filename }} to /etc/{{ fstab_cifs_credentials_filename }}"
template:
src: "{{ fstab_cifs_credentials_filename }}"
dest: "/etc/{{ fstab_cifs_credentials_filename }}"
owner: root
group: root
mode: '600'
register: fstab_template_cifs_credentials_result
- name: Mount/Remount CIFS devices & edit fstab accordingly
mount:
state: mounted
src: "{{ item.value.src }}"
path: "{{ item.value.path }}"
fstype: cifs
opts: "uid={{ item.value.uid }},gid={{ item.value.gid }},file_mode=0{{ item.value.file_mode }},dir_mode=0{{ item.value.dir_mode }},credentials=/etc/{{ fstab_cifs_credentials_filename }},iocharset=utf8,rw,mfsymlinks,vers=3.0,seal"
loop: "{{ cifs_mounts | dict2items }}"

View file

@ -0,0 +1,2 @@
username={{ cifs_credentials['username'] }}
password={{ cifs_credentials['password'] }}

View file

@ -0,0 +1 @@
fstab_cifs_credentials_filename: storagebox-cifs-credentials.txt

View file

@ -0,0 +1,35 @@
- name: "Create {{ hedgedoc_project_dir }} project directory"
file:
path: "{{ hedgedoc_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ hedgedoc_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: "Create directory {{ volumes['hedgedoc_configdir'] }} with correct permissions"
file:
path: "{{ volumes['hedgedoc_configdir'] }}"
state: directory
owner: "{{ users['hedgedoc'] + uid_shift }}"
group: "{{ users['hedgedoc'] + uid_shift }}"
mode: '770'
become: true
- name: "Create directory {{ volumes['hedgedoc_mysql_datadir'] }} with correct permissions"
file:
path: "{{ volumes['hedgedoc_mysql_datadir'] }}"
state: directory
owner: "{{ users['hedgedoc_mysql'] + uid_shift }}"
group: "{{ users['hedgedoc_mysql'] + uid_shift }}"
mode: '770'
become: true
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ hedgedoc_project_dir }}"
pull: "{{ docker_pull_images | bool }}"

View file

@ -0,0 +1,32 @@
services:
hedgedoc:
container_name: hedgedoc
image: lscr.io/linuxserver/hedgedoc:latest
restart: always
environment:
- PUID={{ users['hedgedoc'] }}
- PGID={{ users['hedgedoc'] }}
- TZ={{ timezone }}
- DB_HOST=hedgedoc-mysql
- DB_PORT=3306
- DB_USER=root
- DB_PASS={{ hedgedoc_secrets['mysql_root_password'] }}
- DB_NAME=hedgedoc
- CMD_DOMAIN=hedgedoc.{{ domain }}
ports:
- {{ ports['hedgedoc'] }}:3000
volumes:
- {{ volumes['hedgedoc_configdir'] }}:/config
mysql:
container_name: hedgedoc-mysql
image: mysql:latest
restart: always
user: {{ users['hedgedoc_mysql'] }}:{{ users['hedgedoc_mysql'] }}
environment:
MYSQL_DATABASE: hedgedoc
MYSQL_ROOT_PASSWORD: "{{ hedgedoc_secrets['mysql_root_password'] }}"
volumes:
- {{ volumes['hedgedoc_mysql_datadir'] }}:/var/lib/mysql

View file

@ -0,0 +1 @@
hedgedoc_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

Before

Width:  |  Height:  |  Size: 251 KiB

After

Width:  |  Height:  |  Size: 251 KiB

View file

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 10 KiB

View file

Before

Width:  |  Height:  |  Size: 978 B

After

Width:  |  Height:  |  Size: 978 B

View file

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 3.2 KiB

View file

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

View file

Before

Width:  |  Height:  |  Size: 230 KiB

After

Width:  |  Height:  |  Size: 230 KiB

View file

Before

Width:  |  Height:  |  Size: 5 KiB

After

Width:  |  Height:  |  Size: 5 KiB

View file

Before

Width:  |  Height:  |  Size: 6.1 KiB

After

Width:  |  Height:  |  Size: 6.1 KiB

View file

Before

Width:  |  Height:  |  Size: 2 KiB

After

Width:  |  Height:  |  Size: 2 KiB

View file

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 3.2 KiB

View file

Before

Width:  |  Height:  |  Size: 576 KiB

After

Width:  |  Height:  |  Size: 576 KiB

View file

@ -1,4 +1,3 @@
user nginx;
pid /tmp/nginx.pid; pid /tmp/nginx.pid;
worker_processes auto; worker_processes auto;

View file

@ -0,0 +1,29 @@
- name: "Create {{ homepage_project_dir }} project directory"
file:
path: "{{ homepage_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ homepage_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Copy nginx.conf and index/ to project directory
copy:
src: "{{ role_path }}/files/"
dest: "{{ homepage_project_dir }}"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '644'
# Store result to restart services if the file(s) changed
register: homepage_copy_files_result
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ homepage_project_dir }}"
pull: "{{ docker_pull_images | bool }}"
# Restart if config file(s) changed
restarted: "{{ homepage_copy_files_result['changed'] | bool }}"

View file

@ -1,11 +1,11 @@
services: services:
nginx-www: homepage:
image: nginx:alpine image: nginx:alpine
restart: always restart: always
container_name: nginx-www container_name: homepage
user: '101:101' user: {{ users['homepage'] }}:{{ users['homepage'] }}
ports: ports:
- 8082:80 - {{ ports['homepage'] }}:80
volumes: volumes:
- ./nginx.conf:/etc/nginx/nginx.conf - ./nginx.conf:/etc/nginx/nginx.conf
- ./index:/mnt/index - ./index:/mnt/index

View file

@ -0,0 +1 @@
homepage_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,7 @@
- name: Include vars from env.yml file
include_vars:
file: "{{ playbook_dir }}/../env.yml"
- name: Include secrets from secrets.yml file
include_vars:
file: "{{ playbook_dir }}/../secrets.yml"

View file

@ -0,0 +1,22 @@
- name:
become: true
block:
- name: Install package nftables
apt:
name: nftables
- name: Template nftables.conf to /etc/nftables.conf
template:
src: nftables.conf
dest: /etc/nftables.conf
owner: root
group: root
mode: '755'
register: nftables_template_conf_result
- name: Restart nftables service
service:
name: nftables
state: restarted
enabled: true
when: nftables_template_conf_result['changed']

View file

@ -11,20 +11,24 @@ table inet filter {
ct state invalid drop ct state invalid drop
ct state { established, related } accept ct state { established, related } accept
# HTTP & Syncthing Relay # Allow ICMP
tcp dport { http, https, 5432, 22000 } limit rate 5/second accept meta l4proto icmp limit rate 1/second accept
udp dport 22000 limit rate 5/second accept meta l4proto ipv6-icmp limit rate 1/second accept
# HTTP/S
tcp dport { http, https } limit rate 5/second accept
# SSH # SSH
tcp dport 995 limit rate 15/minute accept tcp dport 995 limit rate 15/minute accept
# TURN # Syncthing
tcp dport { 3478, 5349 } limit rate 5/second accept tcp dport {{ ports['syncthing_tcp'] }} limit rate 5/second accept
udp dport { 3478, 5349, 49152-49172 } limit rate 5/second accept udp dport {{ ports['syncthing_udp'] }} limit rate 5/second accept
# Coturn
tcp dport { {{ ports['coturn_listening'] }}, {{ ports['coturn_tls_listening'] }} } limit rate 5/second accept
udp dport { {{ ports['coturn_listening'] }}, {{ ports['coturn_tls_listening'] }}, {{ ports['coturn_relay_min'] }}-{{ ports['coturn_relay_max'] }} } limit rate 5/second accept
# Allow ICMP
meta l4proto icmp limit rate 1/second accept
meta l4proto ipv6-icmp limit rate 1/second accept
} }
chain forward { chain forward {

View file

@ -0,0 +1,70 @@
- name:
become: true
block:
- name: Install package nginx
apt:
name: nginx
- name: Template nginx.conf to /etc/nginx/nginx.conf
template:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '644'
register: nginx_template_nginx_conf_result
- name: Template reverse-proxy.conf to /etc/nginx/sites-available/reverse-proxy.conf
template:
src: reverse-proxy.conf
dest: /etc/nginx/sites-available/reverse-proxy.conf
owner: root
group: root
mode: '644'
register: nginx_template_reverse_proxy_conf_result
- name: Remove all enabled NGINX sites
file:
state: "{{ item }}"
path: "/etc/nginx/sites-enabled"
owner: root
group: root
mode: '755'
loop:
- absent
- directory
- name: Enable reverse-proxy.conf site
file:
state: link
src: /etc/nginx/sites-available/reverse-proxy.conf
dest: /etc/nginx/sites-enabled/reverse-proxy.conf
- name: Get state of file /etc/nginx/dhparam.txt
stat:
path: /etc/nginx/dhparam.txt
register: nginx_stat_dhparam_result
- name: Download dhparam file from Mozilla
get_url:
url: https://ssl-config.mozilla.org/ffdhe2048.txt
dest: /etc/nginx/dhparam.txt
when: not nginx_stat_dhparam_result.stat.exists
- name: Set correct permissions on certificate directories
file:
path: "/etc/letsencrypt/{{ item }}/{{ domain }}"
state: directory
owner: root
group: root
mode: '750'
loop:
- live
- archive
- name: Start/Reload NGINX service
service:
name: nginx
# Reload if conf changed, if not make sure it is started
state: "{{ (nginx_template_nginx_conf_result['changed'] or nginx_template_reverse_proxy_conf_result['changed']) | ternary('reloaded', 'started') }}"
enabled: yes

View file

@ -32,9 +32,9 @@ http {
# SSL Settings # SSL Settings
## ##
ssl_certificate /etc/letsencrypt/live/viyurz.fr/fullchain.pem; ssl_certificate {{ reverse_proxy['ssl_certificate_file'] }};
ssl_certificate_key /etc/letsencrypt/live/viyurz.fr/privkey.pem; ssl_certificate_key {{ reverse_proxy['ssl_certificate_key_file'] }};
ssl_trusted_certificate /etc/letsencrypt/live/viyurz.fr/chain.pem; ssl_trusted_certificate {{ reverse_proxy['ssl_trusted_certificate_file'] }};
ssl_protocols TLSv1.2 TLSv1.3; ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305; ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
@ -61,7 +61,7 @@ http {
# Headers # Headers
## ##
resolver 185.12.64.12 [a01:4ff:ff00::add:2] [2a01:4ff:ff00::add:1]; resolver {{ reverse_proxy['resolver'] }};
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
# add_header X-Robots-Tag "noindex, nofollow" always; # add_header X-Robots-Tag "noindex, nofollow" always;

View file

@ -21,26 +21,26 @@ server {
} }
# Base domain / Homepage # Homepage
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name viyurz.fr; server_name {{ domain }};
location = /.well-known/matrix/server { location = /.well-known/matrix/server {
default_type application/json; default_type application/json;
return 200 '{ "m.server": "matrix.viyurz.fr:443" }'; return 200 '{ "m.server": "matrix.{{ domain }}:443" }';
} }
location = /.well-known/matrix/client { location = /.well-known/matrix/client {
default_type application/json; default_type application/json;
add_header Access-Control-Allow-Origin '*'; add_header Access-Control-Allow-Origin '*';
return 200 '{ "m.homeserver": { "base_url": "https://matrix.viyurz.fr" } }'; return 200 '{ "m.homeserver": { "base_url": "https://matrix.{{ domain }}" } }';
} }
location / { location / {
proxy_pass http://localhost:8082; proxy_pass http://localhost:{{ ports['homepage'] }};
} }
} }
@ -50,7 +50,7 @@ server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name dl.viyurz.fr; server_name dl.{{ domain }};
root /var/www/html; root /var/www/html;
autoindex on; autoindex on;
@ -62,10 +62,10 @@ server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name element.viyurz.fr; server_name element.{{ domain }};
location / { location / {
proxy_pass http://localhost:8084; proxy_pass http://localhost:{{ ports['element'] }};
add_header X-Frame-Options SAMEORIGIN; add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff; add_header X-Content-Type-Options nosniff;
@ -83,23 +83,36 @@ server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name etebase.viyurz.fr; server_name etebase.{{ domain }};
location ~ ^/(?!admin) { location ~ ^/(?!admin) {
proxy_pass http://localhost:3735; proxy_pass http://localhost:{{ ports['etebase'] }};
} }
} }
# SearxNG # Hedgedoc
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name searx.viyurz.fr; server_name hedgedoc.{{ domain }};
location / { location / {
proxy_pass http://localhost:8083; proxy_pass http://localhost:{{ ports['hedgedoc'] }};
}
}
# SearXNG
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name searx.{{ domain }};
location / {
proxy_pass http://localhost:{{ ports['searxng'] }};
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
add_header Set-Cookie "Path=/; HttpOnly; Secure"; add_header Set-Cookie "Path=/; HttpOnly; Secure";
@ -113,37 +126,37 @@ server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name matrix.viyurz.fr; server_name matrix.{{ domain }};
location ~ ^(/_matrix|/_synapse/client) { location ~ ^(/_matrix|/_synapse/client) {
proxy_pass http://localhost:8008; proxy_pass http://localhost:{{ ports['synapse'] }};
# Nginx by default only allows file uploads up to 1M in size # Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M; client_max_body_size {{ synapse['max_upload_size'] }};
} }
location / { location / {
return 308 https://element.viyurz.fr/; return 308 https://element.{{ domain }}/;
} }
} }
# Syncthing Discovery # Syncthing Discovery
upstream stdisco.viyurz.fr { upstream stdisco.{{ domain }} {
# Local IP address:port for discovery server # Local IP address:port for discovery server
server localhost:8443; server localhost:{{ ports['syncthing_discosrv'] }};
} }
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name stdisco.viyurz.fr; server_name stdisco.{{ domain }};
ssl_verify_client optional_no_ca; ssl_verify_client optional_no_ca;
location / { location / {
proxy_pass http://stdisco.viyurz.fr; proxy_pass http://stdisco.{{ domain }};
proxy_set_header X-Client-Port $remote_port; proxy_set_header X-Client-Port $remote_port;
proxy_set_header X-SSL-Cert $ssl_client_cert; proxy_set_header X-SSL-Cert $ssl_client_cert;
@ -156,14 +169,14 @@ server {
# Vaultwarden # Vaultwarden
upstream vaultwarden-default { upstream vaultwarden-default {
zone vaultwarden-default 64k; zone vaultwarden-default 64k;
server localhost:8081; server localhost:{{ ports['vaultwarden'] }};
keepalive 2; keepalive 2;
} }
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2; listen [::]:443 ssl http2;
server_name vw.viyurz.fr; server_name vw.{{ domain }};
location / { location / {
proxy_pass http://vaultwarden-default; proxy_pass http://vaultwarden-default;

View file

@ -0,0 +1,29 @@
- name: "Create {{ searxng_project_dir }} project directory"
file:
path: "{{ searxng_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ searxng_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Copy settings.yml and limiter.toml to project directory
copy:
src: "{{ role_path }}/files/"
dest: "{{ searxng_project_dir }}"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '644'
# Store result to restart services if the file(s) changed
register: searxng_copy_files_result
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ searxng_project_dir }}"
pull: "{{ docker_pull_images | bool }}"
# Restart if config file(s) changed
restarted: "{{ searxng_copy_files_result['changed'] | bool }}"

View file

@ -3,10 +3,8 @@ services:
container_name: searxng-redis container_name: searxng-redis
image: docker.io/library/redis:alpine image: docker.io/library/redis:alpine
restart: always restart: always
user: {{ users['searxng_redis'] }}:{{ users['searxng_redis'] }}
command: redis-server --save 30 1 --loglevel warning command: redis-server --save 30 1 --loglevel warning
user: '999:999'
networks:
- searxng
volumes: volumes:
- redis:/data - redis:/data
@ -21,12 +19,10 @@ services:
- SETGID - SETGID
- SETUID - SETUID
environment: environment:
- SEARXNG_BASE_URL=https://searx.viyurz.fr/ - SEARXNG_BASE_URL=https://searx.{{ domain }}
- SEARXNG_SECRET=${SEARXNG_SECRET} - SEARXNG_SECRET={{ searxng_secrets['searxng_secret'] }}
networks:
- searxng
ports: ports:
- 8083:8080 - {{ ports['searxng'] }}:8080
volumes: volumes:
- ./settings.yml:/etc/searxng/settings.yml - ./settings.yml:/etc/searxng/settings.yml
- ./limiter.toml:/etc/searxng/limiter.toml - ./limiter.toml:/etc/searxng/limiter.toml
@ -36,10 +32,5 @@ services:
max-size: "1m" max-size: "1m"
max-file: "1" max-file: "1"
networks:
searxng:
ipam:
driver: default
volumes: volumes:
redis: redis:

View file

@ -0,0 +1 @@
searxng_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,53 @@
- name: "Create {{ synapse_project_dir }} project directory"
file:
path: "{{ synapse_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ synapse_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Template homeserver.yaml to project directory
template:
src: homeserver.yaml
dest: "{{ synapse_project_dir }}/homeserver.yaml"
owner: "{{ ansible_env['USER'] }}"
mode: '640'
# Store result to restart services if the file changed
register: synapse_template_homeserver_result
# Separate task because template module cannot chown/chgrp to a non-existing user/group
- name: "Change group of homeserver.yaml to synapse GID ({{ users['synapse'] + uid_shift }})"
file:
path: "{{ synapse_project_dir }}/homeserver.yaml"
group: "{{ users['synapse'] + uid_shift }}"
become: true
- name: "Create directory {{ volumes['synapse_datadir'] }} with correct permissions"
file:
path: "{{ volumes['synapse_datadir'] }}"
state: directory
owner: "{{ users['synapse'] + uid_shift }}"
group: "{{ users['synapse'] + uid_shift }}"
mode: '770'
become: true
- name: "Create directory {{ volumes['synapse_postgres_datadir'] }} with correct permissions"
file:
path: "{{ volumes['synapse_postgres_datadir'] }}"
state: directory
owner: "{{ users['synapse_postgres'] + uid_shift }}"
group: "{{ users['synapse_postgres'] + uid_shift }}"
mode: '770'
become: true
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ synapse_project_dir }}"
pull: "{{ docker_pull_images | bool }}"
# Restart if config file(s) changed
restarted: "{{ synapse_template_homeserver_result['changed'] | bool }}"

View file

@ -0,0 +1,27 @@
services:
postgres:
container_name: synapse-postgres
image: postgres:alpine
restart: always
user: {{ users['synapse_postgres'] }}:{{ users['synapse_postgres'] }}
environment:
LANG: C
POSTGRES_INITDB_ARGS: "--locale=C --encoding=UTF8"
POSTGRES_USER: {{ synapse_secrets['postgres_user'] }}
POSTGRES_PASSWORD: {{ synapse_secrets['postgres_password'] }}
volumes:
- {{ volumes['synapse_postgres_datadir'] }}:/var/lib/postgresql/data
synapse:
container_name: synapse
image: matrixdotorg/synapse:latest
restart: always
environment:
UID: {{ users['synapse'] }}
GID: {{ users['synapse'] }}
TZ: {{ timezone }}
ports:
- {{ ports['synapse'] }}:8008
volumes:
- {{ volumes['synapse_datadir'] }}:/data
- ./homeserver.yaml:/data/homeserver.yaml

View file

@ -4,7 +4,7 @@
# Server # Server
server_name: "viyurz.fr" server_name: "{{ domain }}"
pid_file: /data/homeserver.pid pid_file: /data/homeserver.pid
listeners: listeners:
- port: 8008 - port: 8008
@ -24,8 +24,8 @@ max_avatar_size: 2M
database: database:
name: psycopg2 name: psycopg2
args: args:
user: synapse user: {{ synapse_secrets['postgres_user'] }}
password: synapse password: {{ synapse_secrets['postgres_password'] }}
dbname: synapse dbname: synapse
host: synapse-postgres host: synapse-postgres
cp_min: 5 cp_min: 5
@ -33,13 +33,13 @@ database:
# Logging # Logging
log_config: "/data/viyurz.fr.log.config" log_config: "/data/{{ domain }}.log.config"
# Media Store # Media Store
media_store_path: /data/media_store media_store_path: /data/media_store
# Changer aussi le max_body_size dans le reverse proxy # Changer aussi le max_body_size dans le reverse proxy
max_upload_size: 50M max_upload_size: {{ synapse['max_upload_size'] }}
media_retention: media_retention:
remote_media_lifetime: 14d remote_media_lifetime: 14d
url_preview_enabled: true url_preview_enabled: true
@ -70,7 +70,8 @@ url_preview_accept_language:
# TURN # TURN
turn_uris: ["turns:turn.viyurz.fr?transport=udp", "turns:turn.viyurz.fr?transport=tcp"] turn_uris: ["turns:turn.{{ domain }}?transport=udp", "turns:turn.{{ domain }}?transport=tcp"]
turn_shared_secret: "{{ synapse_secrets['turn_shared_secret'] }}"
turn_user_lifetime: 86400000 turn_user_lifetime: 86400000
turn_allow_guests: true turn_allow_guests: true
@ -84,8 +85,13 @@ registration_requires_token: true
report_stats: true report_stats: true
# API Configuration
macaroon_secret_key: "{{ synapse_secrets['macaroon_secret_key'] }}"
form_secret: "{{ synapse_secrets['form_secret'] }}"
# Signing Keys # Signing Keys
signing_key_path: "/data/viyurz.fr.signing.key" signing_key_path: "/data/{{ domain }}.signing.key"
trusted_key_servers: trusted_key_servers:
- server_name: "matrix.org" - server_name: "matrix.org"
suppress_key_server_warning: true suppress_key_server_warning: true

View file

@ -0,0 +1 @@
synapse_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,17 @@
- name: "Create {{ syncthing_project_dir }} project directory"
file:
path: "{{ syncthing_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ syncthing_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ syncthing_project_dir }}"
pull: "{{ docker_pull_images | bool }}"

View file

@ -0,0 +1,35 @@
services:
syncthing:
image: syncthing/syncthing:latest
container_name: syncthing
restart: always
user: {{ users['syncthing'] }}:{{ users['syncthing'] }}
environment:
- PUID={{ users['syncthing'] }}
- PGID={{ users['syncthing'] }}
ports:
- {{ ports['syncthing_webui'] }}:8384 # Web UI
- {{ ports['syncthing_tcp'] }}:22000/tcp # TCP file transfers
- {{ ports['syncthing_udp'] }}:22000/udp # QUIC file transfers
volumes:
- {{ volumes['syncthing_datadir'] }}:/var/syncthing
stdiscosrv:
image: syncthing/discosrv:latest
container_name: syncthing-discosrv
restart: always
entrypoint:
- "/bin/entrypoint.sh"
- "/bin/stdiscosrv"
- "-http"
- "-debug"
environment:
- PUID={{ users['syncthing_discosrv'] }}
- PGID={{ users['syncthing_discosrv'] }}
networks:
- discosrv
ports:
- {{ ports['syncthing_discosrv'] }}:8443
networks:
discosrv:

View file

@ -0,0 +1 @@
syncthing_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -0,0 +1,26 @@
- name: "Create {{ vaultwarden_project_dir }} project directory"
file:
path: "{{ vaultwarden_project_dir }}"
state: directory
- name: Template docker-compose.yaml to project directory
template:
src: docker-compose.yaml
dest: "{{ vaultwarden_project_dir }}/docker-compose.yaml"
owner: "{{ ansible_env['USER'] }}"
group: "{{ ansible_env['USER'] }}"
mode: '640'
- name: "Create directory {{ volumes['vaultwarden_datadir'] }} with correct permissions"
file:
path: "{{ volumes['vaultwarden_datadir'] }}"
state: directory
owner: "{{ users['vaultwarden'] + uid_shift }}"
group: "{{ users['vaultwarden'] + uid_shift }}"
mode: '770'
become: true
- name: Pull/Create/Restart project services
community.docker.docker_compose:
project_src: "{{ vaultwarden_project_dir }}"
pull: "{{ docker_pull_images | bool }}"

View file

@ -3,12 +3,12 @@ services:
image: vaultwarden/server:alpine image: vaultwarden/server:alpine
container_name: vaultwarden container_name: vaultwarden
restart: always restart: always
user: '1000:1000' user: {{ users['vaultwarden'] }}:{{ users['vaultwarden'] }}
environment: environment:
- DOMAIN=https://vw.viyurz.fr # Your domain; vaultwarden needs to know it's https to work properly with attachments - DOMAIN=https://vw.{{ domain }}
- SIGNUPS_ALLOWED=false - SIGNUPS_ALLOWED=false
- INVITATIONS_ALLOWED=false - INVITATIONS_ALLOWED=false
ports: ports:
- 8081:80 - {{ ports['vaultwarden'] }}:80
volumes: volumes:
- /mnt/vwdata:/data - {{ volumes['vaultwarden_datadir' ] }}:/data

View file

@ -0,0 +1 @@
vaultwarden_project_dir: "{{ docker_projects_dir }}/{{ role_name }}"

View file

@ -1,4 +0,0 @@
# Generate secret with:
# $ cat /dev/urandom | tr -dc 'a-z-A-Z-0-9' | head -c 50
SEARXNG_SECRET=XXX

22
secrets.yml.example Normal file
View file

@ -0,0 +1,22 @@
ansible_become_password:
cifs_credentials:
username:
password:
# To generate random secret: openssl rand -base64 50
coturn_secrets:
static_auth_secret:
hedgedoc_secrets:
mysql_root_password:
searxng_secrets:
searxng_secret:
synapse_secrets:
postgres_user:
postgres_password:
turn_shared_secret: "{{ coturn_secrets['static_auth_secret'] }}"
macaroon_secret_key:
form_secret:

View file

@ -1,26 +0,0 @@
#!/bin/bash
sudo apt install -y uidmap slirp4netns rootlesskit
if ! grep -q '/usr/share/docker.io/contrib' "$HOME/.profile" > /dev/null; then
echo 'export PATH="/usr/share/docker.io/contrib:$PATH"' >> "$HOME/.profile"
fi
if ! grep -q 'DOCKER_HOST' "$HOME/.profile" > /dev/null; then
echo "export DOCKER_HOST=unix:///run/user/$(id -u)/docker.sock" >> "$HOME/.profile"
fi
sudo loginctl enable-linger "$USER"
PATH="/usr/share/docker.io/contrib:$PATH" dockerd-rootless-setuptool.sh install
mkdir -p "$HOME/.config/systemd/user/docker.service.d"
cat << EOF > "$HOME/.config/systemd/user/docker.service.d/override.conf"
[Service]
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_NET=slirp4netns"
Environment="DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=slirp4netns"
EOF
systemctl --user enable docker
systemctl --user restart docker

View file

@ -1,44 +0,0 @@
services:
postgres:
container_name: synapse-postgres
image: postgres:alpine
restart: always
user: '70:70'
environment:
LANG: C
POSTGRES_INITDB_ARGS: "--locale=C --encoding=UTF8"
POSTGRES_USER: synapse
POSTGRES_PASSWORD: synapse
networks:
- synapse
volumes:
- /mnt/synapsepgdata:/var/lib/postgresql/data
synapse:
container_name: synapse
image: matrixdotorg/synapse:latest
# command: generate
restart: always
user: '991:991'
command: >
run
--config-path=/data/homeserver.yaml
--config-path=/data/secrets.yaml
environment:
SYNAPSE_SERVER_NAME: viyurz.fr
SYNAPSE_REPORT_STATS: "yes"
SYNAPSE_HTTP_PORT: 8008
TZ: "Europe/Paris"
networks:
- synapse
ports:
- 8008:8008
volumes:
- /mnt/synapsedata:/data
- ./homeserver.yaml:/data/homeserver.yaml
- ./secrets.yaml:/data/secrets.yaml
networks:
synapse:
ipam:
driver: default

View file

@ -1,9 +0,0 @@
#!/bin/bash
rel_path="$(dirname "$0")"
access_token="$(cat $rel_path/./matrix_access_token.txt)"
curl --header "Authorization: Bearer $access_token" \
-H "Content-Type: application/json" -w "\n" \
-X POST http://localhost:8008/_synapse/admin/v1/registration_tokens/new \
-d '{ "uses_allowed": 1, "expiry_time": '"$(date +%s000 -d tomorrow)"' }'

View file

@ -1 +0,0 @@
syt_XXX

View file

@ -1,6 +0,0 @@
# Generate random secret:
# $ cat /dev/urandom | tr -dc '[:graph:]' | tr -d '"\\' | head -c 50
turn_shared_secret: "XXX"
macaroon_secret_key: "XXX"
form_secret: "XXX"

View file

@ -1,36 +0,0 @@
services:
syncthing:
image: syncthing/syncthing:latest
container_name: syncthing
restart: always
user: '1001:1001'
environment:
- PUID=1001
- PGID=1001
ports:
- 8384:8384 # Web UI
- 22000:22000/tcp # TCP file transfers
- 22000:22000/udp # QUIC file transfers
- 5432:22000/tcp
volumes:
- /mnt/syncthing:/var/syncthing
stdiscosrv:
image: syncthing/discosrv:latest
container_name: stdiscosrv
restart: always
entrypoint:
- "/bin/entrypoint.sh"
- "/bin/stdiscosrv"
- "-http"
- "-debug"
environment:
- PUID=1002
- PGID=1002
networks:
- disco
ports:
- 8443:8443
networks:
disco:

View file

@ -1,81 +0,0 @@
#!/bin/bash
services=(coturn element etebase nginx-www searxng synapse syncthing vw)
needs_backup=(etebase synapse vw)
rel_path="$(dirname "$0")"
function pull {
docker-compose -f "$rel_path/$1/docker-compose.yaml" pull
}
# Runs compose up & eventually make a backup before
# $1 = project name
function up {
if echo "${needs_backup[*]}" | grep -qP "\b$1\b" && has_update "$1"; then
sudo "$rel_path/backup.sh" "$1" --norestart
fi
docker-compose -f "$rel_path/$1/docker-compose.yaml" up -d
}
# To use after pulling latest images of project
# Checks if at least one container can be updated
# $1 = project name
function has_update {
readarray -t cont_list < <(docker-compose -f "$rel_path/$1/docker-compose.yaml" ps -a | tail -n+3 | cut -d ' ' -f 1)
for cont in "${cont_list[@]}"; do
# Return true if container doesn't exist
if ! docker ps -a --format='{{.Names}}' | grep -q "$cont"; then
return 0
fi
cont_image_id="$(docker inspect "$cont" --format='{{.Image}}')"
repo_url="$(docker inspect "$cont" --format='{{.Config.Image}}')"
repo_image_id="$(docker image inspect "$repo_url" --format='{{.Id}}')"
if [[ "$cont_image_id" != "$repo_image_id" ]]; then
return 0
fi
done
return 1
}
service="$(echo "$2" | sed -E 's/[/ ]//g')"
case "$1" in
pull)
if [[ -z "$service" ]]; then
for serv in "${services[@]}"; do
pull "$serv"
done
elif echo "${services[*]}" | grep -qP "\b$service\b"; then
pull "$service"
else
echo "invalid project name. it should be one of: ${services[*]}."
fi
;;
up)
if [[ -z "$service" ]]; then
for serv in "${services[@]}"; do
pull "$serv"
up "$serv"
done
elif echo "${services[*]}" | grep -qP "\b$service\b"; then
pull "$service"
up "$service"
else
echo "Invalid project name. It should be one of: ${services[*]}."
fi
;;
*)
echo "Invalid action. It should be one of: pull, up."
;;
esac