Migration to Podman & Python script, start
This commit is contained in:
parent
9094da305c
commit
569ce40d86
10 changed files with 521 additions and 0 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -1 +1,3 @@
|
|||
secrets.yml
|
||||
pysecrets.yml
|
||||
*.rendered
|
||||
|
|
193
manage.py
Executable file
193
manage.py
Executable file
|
@ -0,0 +1,193 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import os, sys, re
|
||||
from glob import glob
|
||||
from mako.template import Template
|
||||
import subprocess
|
||||
import yaml
|
||||
|
||||
|
||||
def backupProj(project):
|
||||
print(f"Running backup for project {project}.")
|
||||
# loop env.volumes & secrets.postgres
|
||||
|
||||
|
||||
def getImageId (image):
|
||||
return subprocess.run(["podman", "image", "inspect", "--format", "{{.Id}}", image], capture_output=True, text=True).stdout.strip()
|
||||
|
||||
|
||||
def getUid(service):
|
||||
return env['users'][service] + env['uid_shift']
|
||||
|
||||
|
||||
def pullProj(project):
|
||||
print(f"Pulling images for project {project}.")
|
||||
|
||||
with open(f"projects/{project}/compose.yaml.rendered", 'r') as composefile:
|
||||
images = re.findall('(?<=image:\\s).+', composefile.read())
|
||||
|
||||
pulledImages = []
|
||||
for image in images:
|
||||
currentId = getImageId(image)
|
||||
subprocess.run(["podman", "pull", image])
|
||||
pulledId = getImageId(image)
|
||||
if currentId != pulledId:
|
||||
pulledImages += image
|
||||
print(f"Pulled new version of image {image}.")
|
||||
else:
|
||||
print(f"No update available for image {image}.")
|
||||
|
||||
return pulledImages
|
||||
|
||||
|
||||
def renderFile(templateFile):
|
||||
print(f"Rendering file {templateFile}.")
|
||||
|
||||
renderedFile = re.sub('\\.mako$', '.rendered', templateFile)
|
||||
|
||||
template = Template(filename=templateFile)
|
||||
|
||||
descriptor = os.open(renderedFile, os.O_WRONLY|os.O_CREAT, 0o640)
|
||||
|
||||
outputFile = open(descriptor, "w")
|
||||
outputFile.write(template.render(env=env, secrets=secrets))
|
||||
outputFile.close()
|
||||
|
||||
|
||||
def setCertPerms(service):
|
||||
for path in ["/etc/letsencrypt", "/etc/letsencrypt/live", "/etc/letsencrypt/archive"]:
|
||||
setOwner(path, 0, 0)
|
||||
setPerms(path, 751)
|
||||
|
||||
pkeyFile = env['certs'][service]['pkey']
|
||||
|
||||
domain_dir = re.search('.+(?=\\/.+$)', pkeyFile).group(0)
|
||||
for path in [domain_dir, re.sub('live', 'archive', domain_dir)]:
|
||||
setOwner(path, env['host_uid'], getUid(service))
|
||||
setPerms(path, 550)
|
||||
|
||||
setOwner(pkeyFile, 0, getUid(service))
|
||||
setPerms(pkeyFile, 640)
|
||||
|
||||
|
||||
def setOwner(path, uid=None, gid=None):
|
||||
stat = os.stat(path)
|
||||
if not uid:
|
||||
uid = stat.st_uid
|
||||
if not gid:
|
||||
gid = stat.st_gid
|
||||
|
||||
if stat.st_uid != uid or stat.st_gid != gid:
|
||||
print(f"Changing ownership of {path} to {uid}:{gid} from {stat.st_uid}:{stat.st_gid}.")
|
||||
child = subprocess.Popen(["sudo", "chown", f"{uid}:{gid}", path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
if re.search('^\\[sudo\\] password for .+', child.stdout.read().strip()):
|
||||
child.communicate(input=input().encode())[0]
|
||||
else:
|
||||
print(f"Ownership of {path} already set to {uid}:{gid}.")
|
||||
|
||||
|
||||
def setPerms(path, mode):
|
||||
mode = str(mode)
|
||||
stat = os.stat(path)
|
||||
curMode = oct(stat.st_mode)[-3:]
|
||||
if mode != curMode:
|
||||
print(f"Changing permissions of {path} to {mode} from {curMode}.")
|
||||
if stat.st_uid == env['host_uid']:
|
||||
subprocess.run(["chmod", mode, path])
|
||||
else:
|
||||
subprocess.run(["sudo", "chmod", mode, path])
|
||||
else:
|
||||
print(f"Permissions of {path} already set to {mode}.")
|
||||
|
||||
|
||||
def setupProj(project):
|
||||
print(f"Running setup for project {project}.")
|
||||
|
||||
backupProj(project)
|
||||
|
||||
if project in env['certs'].keys():
|
||||
setCertPerms(project)
|
||||
|
||||
for templateFile in glob(f"projects/{project}/*.mako"):
|
||||
renderFile(templateFile)
|
||||
setOwner(re.sub('\\.mako$', '.rendered', templateFile), env['host_uid'], getUid(project))
|
||||
|
||||
upProj(project)
|
||||
|
||||
|
||||
def upProj(project):
|
||||
print(f"Creating & starting stack for project {project}.")
|
||||
if subprocess.run(["podman", "container", "exists", project]).returncode == 0:
|
||||
subprocess.run(["podman-compose", "-f", f"projects/{project}/compose.yaml.rendered", "down"])
|
||||
subprocess.run(["podman-compose", "-f", f"projects/{project}/compose.yaml.rendered", "up", "-d"])
|
||||
|
||||
|
||||
def updateProj(project):
|
||||
if not os.path.isfile(f"projects/{project}/compose.yaml.rendered"):
|
||||
setupProj(project)
|
||||
|
||||
print(f"Running update for project {project}.")
|
||||
|
||||
if len(pullProj(project)) > 0:
|
||||
backupProj(project)
|
||||
upProj(project)
|
||||
|
||||
|
||||
def main():
|
||||
envFile = "pyenv.yml"
|
||||
secretsFile = "pysecrets.yml"
|
||||
|
||||
os.chdir(os.path.realpath(sys.path[0]))
|
||||
|
||||
with open(envFile, 'r') as envfile, open(secretsFile, 'r') as secretsfile:
|
||||
global env, secrets
|
||||
env = yaml.safe_load(envfile)
|
||||
env = yaml.safe_load(Template(filename=envFile).render(env=env))
|
||||
secrets = yaml.safe_load(secretsfile)
|
||||
|
||||
setOwner(secretsFile, env['host_uid'], env['host_uid'])
|
||||
setPerms(secretsFile, 600)
|
||||
|
||||
print("\nChoose action:")
|
||||
print("[1/S] Setup project")
|
||||
print("[2/U] Update project")
|
||||
print("[3/B] Backup project")
|
||||
|
||||
action = ''
|
||||
while action == '':
|
||||
action = input("Action: ")
|
||||
|
||||
projects = os.listdir("projects")
|
||||
print(f"\nProjects list: {projects}")
|
||||
|
||||
target_projects = input("Target compose project(s), space separated, leave empty to target all: ")
|
||||
|
||||
if target_projects == '':
|
||||
target_projects = projects
|
||||
else:
|
||||
target_projects = target_projects.split(' ')
|
||||
print(f"Target projects: {target_projects}")
|
||||
|
||||
match action:
|
||||
case '1' | 'S':
|
||||
for project in target_projects:
|
||||
try:
|
||||
print()
|
||||
setupProj(project)
|
||||
except Exception as e:
|
||||
print(e, file=sys.stderr)
|
||||
print(f"Failed to setup project {project}.", file=sys.stderr)
|
||||
|
||||
case '2' | 'U':
|
||||
for project in target_projects:
|
||||
try:
|
||||
print()
|
||||
updateProj(project)
|
||||
except Exception as e:
|
||||
print(e, file=sys.stderr)
|
||||
print(f"Failed to update project {project}.", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
9
podman.service
Normal file
9
podman.service
Normal file
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Podman User Service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStart=/usr/bin/podman start --all
|
||||
ExecStop=/usr/bin/podman stop --all
|
||||
Restart=/usr/bin/podman restart --all
|
20
projects/coturn/compose.yaml.mako
Normal file
20
projects/coturn/compose.yaml.mako
Normal file
|
@ -0,0 +1,20 @@
|
|||
services:
|
||||
coturn:
|
||||
container_name: coturn
|
||||
image: docker.io/coturn/coturn:4-alpine
|
||||
network_mode: pasta
|
||||
restart: always
|
||||
user: ${env['users']['coturn']}
|
||||
ports:
|
||||
- ${env['ports']['coturn_listening']}:${env['ports']['coturn_listening']}
|
||||
- ${env['ports']['coturn_listening']}:${env['ports']['coturn_listening']}/udp
|
||||
- ${env['ports']['coturn_tls_listening']}:${env['ports']['coturn_tls_listening']}
|
||||
- ${env['ports']['coturn_tls_listening']}:${env['ports']['coturn_tls_listening']}/udp
|
||||
- ${env['ports']['coturn_relay_min']}-${env['ports']['coturn_relay_max']}:${env['ports']['coturn_relay_min']}-${env['ports']['coturn_relay_max']}/udp
|
||||
tmpfs:
|
||||
- /var/lib/coturn
|
||||
volumes:
|
||||
- ./turnserver.conf.rendered:/etc/coturn/turnserver.conf:ro
|
||||
- ${env['certs']['coturn']['cert']}:/etc/coturn/cert.pem:ro
|
||||
- ${env['certs']['coturn']['pkey']}:/etc/coturn/pkey.pem:ro
|
||||
|
68
projects/coturn/turnserver.conf.mako
Normal file
68
projects/coturn/turnserver.conf.mako
Normal file
|
@ -0,0 +1,68 @@
|
|||
listening-port=${env['ports']['coturn_listening']}
|
||||
tls-listening-port=${env['ports']['coturn_tls_listening']}
|
||||
|
||||
# Lower and upper bounds of the UDP relay endpoints:
|
||||
# (default values are 49152 and 65535)
|
||||
min-port=${env['ports']['coturn_relay_min']}
|
||||
max-port=${env['ports']['coturn_relay_max']}
|
||||
|
||||
#verbose
|
||||
fingerprint
|
||||
|
||||
# Credentials in secrets.conf (static-auth-secret)
|
||||
use-auth-secret
|
||||
static-auth-secret=${secrets['turn_static_auth_secret']}
|
||||
|
||||
realm=turn.${env['domain']}
|
||||
|
||||
# TLS certificates, including intermediate certs.
|
||||
# For Let's Encrypt certificates, use `fullchain.pem` here.
|
||||
cert=/etc/coturn/cert.pem
|
||||
|
||||
# TLS private key file
|
||||
pkey=/etc/coturn/pkey.pem
|
||||
|
||||
# Do not allow an TLS/DTLS version of protocol
|
||||
no-tlsv1
|
||||
no-tlsv1_1
|
||||
|
||||
# Disable RFC5780 (NAT behavior discovery).
|
||||
no-rfc5780
|
||||
no-stun-backward-compatibility
|
||||
response-origin-only-with-rfc5780
|
||||
no-cli
|
||||
|
||||
# VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
|
||||
no-tcp-relay
|
||||
|
||||
# consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
|
||||
user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
|
||||
total-quota=1200
|
||||
|
||||
# don't let the relay ever try to connect to private IP address ranges within your network (if any)
|
||||
# given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
# recommended additional local peers to block, to mitigate external access to internal services.
|
||||
# https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
10
projects/homepage/compose.yaml.mako
Normal file
10
projects/homepage/compose.yaml.mako
Normal file
|
@ -0,0 +1,10 @@
|
|||
services:
|
||||
homepage:
|
||||
container_name: homepage
|
||||
image: git.ahur.ac/viyurz/homepage:latest
|
||||
restart: always
|
||||
user: ${env['users']['homepage']}
|
||||
ports:
|
||||
- 127.0.0.1:${env['ports']['homepage']}:8686
|
||||
volumes:
|
||||
- ./services.toml:/etc/homepage/services.toml:ro
|
56
projects/homepage/services.toml
Normal file
56
projects/homepage/services.toml
Normal file
|
@ -0,0 +1,56 @@
|
|||
[[services]]
|
||||
name = "Element"
|
||||
description = "Web client of Element, an instant messaging client implementing the Matrix protocol."
|
||||
domain = "element.viyurz.fr"
|
||||
language = "TypeScript"
|
||||
repository_url = "https://github.com/element-hq/element-web"
|
||||
|
||||
[[services]]
|
||||
name = "EteBase"
|
||||
description = "Server for EteSync, an end-to-end encrypted contacts, calendars, tasks and notes provider."
|
||||
domain = "etebase.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/etesync/server"
|
||||
|
||||
[[services]]
|
||||
name = "HedgeDoc"
|
||||
description = "A real-time collaborative markdown editor."
|
||||
domain = "hedgedoc.viyurz.fr"
|
||||
language = "TypeScript"
|
||||
repository_url = "https://github.com/hedgedoc/hedgedoc"
|
||||
|
||||
[[services]]
|
||||
name = "Matrix"
|
||||
description = "Synapse homeserver implemeting the Matrix protocol, an open standard for real-time communication supporting encryption and VoIP."
|
||||
domain = "matrix.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/element-hq/synapse"
|
||||
|
||||
[[services]]
|
||||
name = "SearXNG"
|
||||
description = "A privacy-respecting, hackable metasearch engine."
|
||||
domain = "searx.viyurz.fr"
|
||||
language = "Python"
|
||||
repository_url = "https://github.com/searxng/searxng"
|
||||
|
||||
[[services]]
|
||||
name = "Stalwart Mail Server"
|
||||
description = "Secure & Modern All-in-One Mail Server (IMAP, JMAP, SMTP)."
|
||||
domain = "mail.viyurz.fr"
|
||||
language = "Rust"
|
||||
repository_url = "https://github.com/stalwartlabs/mail-server"
|
||||
|
||||
[[services]]
|
||||
name = "Stump"
|
||||
description = "A comics, manga and digital book server with OPDS support."
|
||||
domain = "stump.viyurz.fr"
|
||||
language = "Rust / TypeScript"
|
||||
repository_url = "https://github.com/stumpapp/stump"
|
||||
|
||||
[[services]]
|
||||
name = "Vaultwarden"
|
||||
description = "Rust rewrite of the Bitwarden server, a password management service."
|
||||
domain = "vw.viyurz.fr"
|
||||
language = "Rust"
|
||||
repository_url = "https://github.com/dani-garcia/vaultwarden"
|
||||
|
122
pyenv.yml
Normal file
122
pyenv.yml
Normal file
|
@ -0,0 +1,122 @@
|
|||
domain: viyurz.fr
|
||||
timezone: "Europe/Paris"
|
||||
host_uid: 1000
|
||||
|
||||
# UID shift for mapping between host & containers
|
||||
uid_shift: 99999
|
||||
|
||||
|
||||
# cifs_credentials is undefined when we run the backup playbook
|
||||
# as a cronjob, so set empty default value to prevent errors,
|
||||
# which is fine because we don't use it.
|
||||
cifs_host: "{{ cifs_credentials['username'] | default('') }}.your-storagebox.de"
|
||||
|
||||
cifs_mounts:
|
||||
backups:
|
||||
src: "//{{ cifs_host }}/backup/backups"
|
||||
path: /mnt/storagebox/backups
|
||||
uid: 0
|
||||
gid: "{{ host_uid }}"
|
||||
file_mode: 640
|
||||
dir_mode: 750
|
||||
fireshare:
|
||||
src: "//{{ cifs_host }}/backup/fireshare"
|
||||
path: /mnt/storagebox/fireshare
|
||||
uid: "{{ users['fireshare'] + uid_shift }}"
|
||||
gid: "{{ users['fireshare'] + uid_shift }}"
|
||||
file_mode: 644
|
||||
dir_mode: 755
|
||||
storagebox:
|
||||
src: "//{{ cifs_host }}/backup"
|
||||
path: /mnt/storagebox
|
||||
uid: 0
|
||||
gid: 0
|
||||
file_mode: 640
|
||||
dir_mode: 751
|
||||
syncthing:
|
||||
src: "//{{ cifs_host }}/backup/syncthing"
|
||||
path: /mnt/storagebox/syncthing
|
||||
uid: "{{ users['syncthing'] + uid_shift }}"
|
||||
gid: "{{ users['syncthing'] + uid_shift }}"
|
||||
file_mode: 640
|
||||
dir_mode: 750
|
||||
|
||||
|
||||
borg_repodir: "${env['cifs_mounts']['backups']['path']}/borg"
|
||||
borg_passphrase_file: /etc/borg-passphrase.txt
|
||||
|
||||
|
||||
certs:
|
||||
coturn:
|
||||
cert: "/etc/letsencrypt/live/turn.${env['domain']}/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/turn.${env['domain']}/privkey.pem"
|
||||
mailserver:
|
||||
cert: "/etc/letsencrypt/live/mail.${env['domain']}/fullchain.pem"
|
||||
pkey: "/etc/letsencrypt/live/mail.${env['domain']}/privkey.pem"
|
||||
|
||||
|
||||
# Ports exposed to host
|
||||
ports:
|
||||
coturn_listening: 3478
|
||||
coturn_tls_listening: 5349
|
||||
coturn_relay_min: 49152
|
||||
coturn_relay_max: 49172
|
||||
element: 8084
|
||||
etebase: 3735
|
||||
fireshare: 8085
|
||||
hedgedoc: 8086
|
||||
homepage: 8686
|
||||
keycloak: 8444
|
||||
mailserver_smtp: 1025
|
||||
mailserver_smtps: 1465
|
||||
mailserver_imaps: 1993
|
||||
mailserver_https: 1443
|
||||
postgres: 5432
|
||||
searxng: 8083
|
||||
stump: 10801
|
||||
synapse: 8008
|
||||
syncthing_discosrv: 8443
|
||||
# Public port, forwarded to 22067 by nftables
|
||||
syncthing_relaysrv: 143
|
||||
syncthing_webui: 8384
|
||||
syncthing_tcp: 18880
|
||||
syncthing_udp: 22000
|
||||
uptime_kuma: 3001
|
||||
vaultwarden: 8081
|
||||
|
||||
|
||||
# UID in containers
|
||||
users:
|
||||
coturn: 666
|
||||
etebase: 373
|
||||
fireshare: 1007
|
||||
hedgedoc: 1004
|
||||
homepage: 8686
|
||||
keycloak: 1000
|
||||
mailserver: 8
|
||||
postgres: 70
|
||||
searxng: 977
|
||||
searxng_valkey: 999
|
||||
stump: 1005
|
||||
synapse: 991
|
||||
syncthing: 1001
|
||||
syncthing_discosrv: 1002
|
||||
syncthing_relaysrv: 1003
|
||||
uptime_kuma: 1006
|
||||
vaultwarden: 1010
|
||||
|
||||
|
||||
volumes:
|
||||
etebase_datadir: /mnt/etebasedata
|
||||
fireshare_datadir: /mnt/firesharedata
|
||||
fireshare_processeddir: /mnt/storagebox/fireshare/processed
|
||||
fireshare_videosdir: /mnt/storagebox/fireshare/videos
|
||||
hedgedoc_uploadsdir: /mnt/hedgedocuploads
|
||||
mailserver_datadir: /mnt/mailserver
|
||||
postgres_datadir: /mnt/postgresdata
|
||||
stump_configdir: /mnt/stump/config
|
||||
stump_datadir: /mnt/stump/data
|
||||
synapse_datadir: /mnt/synapsedata
|
||||
syncthing_datadir: "${env['cifs_mounts']['syncthing']['path']}"
|
||||
uptime_kuma_datadir: /mnt/uptimekumadata
|
||||
vaultwarden_datadir: /mnt/vwdata
|
1
pysecrets.yml.example
Normal file
1
pysecrets.yml.example
Normal file
|
@ -0,0 +1 @@
|
|||
turn_static_auth_secret: 5c10e2531f8ef56ccca462966d00383f0184188b1cc8dd5f85e98152bdd9278a45981654d1ff7813e7c5
|
40
setup-podman.sh
Executable file
40
setup-podman.sh
Executable file
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/bash
|
||||
|
||||
if [[ $(whoami) == "root" ]]; then
|
||||
echo "Do not run this script as root."
|
||||
exit
|
||||
fi
|
||||
|
||||
|
||||
sudo apt install -y aardvark-dns dbus-user-session passt podman podman-compose uidmap
|
||||
|
||||
|
||||
for unit in podman.service podman.socket podman-auto-update.service podman-auto-update.timer podman-clean-transient.service podman-restart.service; do
|
||||
sudo systemctl disable --now "$unit"
|
||||
done
|
||||
|
||||
|
||||
sudo loginctl enable-linger $USER
|
||||
|
||||
|
||||
mkdir -p "$HOME/.config/systemd/user"
|
||||
|
||||
cp "$(dirname "$0")/podman.service" "$HOME/.config/systemd/user/podman.service"
|
||||
|
||||
|
||||
declare -A sysctl_vars=(
|
||||
[vm.overcommit_memory]=1
|
||||
[net.core.wmem_max]=2500000
|
||||
[net.core.rmem_max]=2500000
|
||||
)
|
||||
|
||||
echo -n "" | sudo tee /etc/sysctl.d/podman.conf
|
||||
for key in "${!sysctl_vars[@]}"; do
|
||||
value="${sysctl_vars[$key]}"
|
||||
echo "$key = $value" | sudo tee -a /etc/sysctl.d/podman.conf
|
||||
sudo sysctl -p
|
||||
done
|
||||
|
||||
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user restart podman
|
Loading…
Reference in a new issue