From 0124ae08e2de8ff6809f18843fd0bd3ccf29c663 Mon Sep 17 00:00:00 2001 From: Kiara Grouwstra Date: Mon, 14 Apr 2025 11:19:49 +0200 Subject: [PATCH] init --- .gitignore | 34 ++ README.md | 124 +++++ checklist.md | 97 ++++ coturn/README.md | 181 ++++++ coturn/turnserver.conf | 119 ++++ draupnir/README.md | 130 +++++ element-call/README.md | 375 +++++++++++++ element-call/element.json | 6 + element-web/README.md | 70 +++ firewall/README.md | 25 + nginx/README.md | 365 ++++++++++++ nginx/conf/call.conf | 34 ++ nginx/conf/domain.conf | 61 ++ nginx/conf/elementweb.conf | 29 + nginx/conf/livekit.conf | 37 ++ nginx/conf/revproxy.conf | 85 +++ nginx/conf/synapse-admin.conf | 16 + nginx/workers/README.md | 397 +++++++++++++ nginx/workers/conn_optimizations.conf | 13 + nginx/workers/locations.conf | 111 ++++ nginx/workers/maps.conf | 55 ++ nginx/workers/private.conf | 13 + nginx/workers/proxy.conf | 8 + nginx/workers/proxy_forward.conf | 20 + nginx/workers/upstreams.conf | 116 ++++ postgresql/README.md | 84 +++ synapse-admin/README.md | 33 ++ synapse/README.md | 646 ++++++++++++++++++++++ synapse/conf.d/authentication.yaml | 22 + synapse/conf.d/call.yaml | 19 + synapse/conf.d/database.yaml | 9 + synapse/conf.d/email.yaml | 9 + synapse/conf.d/homeserver_blocking.yaml | 11 + synapse/conf.d/keys.yaml | 5 + synapse/conf.d/mediastore.yaml | 29 + synapse/conf.d/report_stats.yaml | 5 + synapse/conf.d/server_name.yaml | 43 ++ synapse/conf.d/server_notices.yaml | 26 + synapse/conf.d/turn.yaml | 9 + synapse/homeserver.yaml | 34 ++ synapse/templates/0.1.html | 43 ++ synapse/templates/success.html | 11 + synapse/well-known-client.json | 12 + synapse/well-known-server.json | 1 + synapse/well-known-support.json | 17 + synapse/workers/README.md | 593 ++++++++++++++++++++ synapse/workers/federation_receiver1.yaml | 15 + synapse/workers/federation_sender1.yaml | 10 + synapse/workers/initial_sync1.yaml | 19 + synapse/workers/login-log.yaml | 41 ++ synapse/workers/login.yaml | 19 + synapse/workers/media-log.yaml | 41 ++ synapse/workers/media.yaml | 15 + 53 files changed, 4342 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 checklist.md create mode 100644 coturn/README.md create mode 100644 coturn/turnserver.conf create mode 100644 draupnir/README.md create mode 100644 element-call/README.md create mode 100644 element-call/element.json create mode 100644 element-web/README.md create mode 100644 firewall/README.md create mode 100644 nginx/README.md create mode 100644 nginx/conf/call.conf create mode 100644 nginx/conf/domain.conf create mode 100644 nginx/conf/elementweb.conf create mode 100644 nginx/conf/livekit.conf create mode 100644 nginx/conf/revproxy.conf create mode 100644 nginx/conf/synapse-admin.conf create mode 100644 nginx/workers/README.md create mode 100644 nginx/workers/conn_optimizations.conf create mode 100644 nginx/workers/locations.conf create mode 100644 nginx/workers/maps.conf create mode 100644 nginx/workers/private.conf create mode 100644 nginx/workers/proxy.conf create mode 100644 nginx/workers/proxy_forward.conf create mode 100644 nginx/workers/upstreams.conf create mode 100644 postgresql/README.md create mode 100644 synapse-admin/README.md create mode 100644 synapse/README.md create mode 100644 synapse/conf.d/authentication.yaml create mode 100644 synapse/conf.d/call.yaml create mode 100644 synapse/conf.d/database.yaml create mode 100644 synapse/conf.d/email.yaml create mode 100644 synapse/conf.d/homeserver_blocking.yaml create mode 100644 synapse/conf.d/keys.yaml create mode 100644 synapse/conf.d/mediastore.yaml create mode 100644 synapse/conf.d/report_stats.yaml create mode 100644 synapse/conf.d/server_name.yaml create mode 100644 synapse/conf.d/server_notices.yaml create mode 100644 synapse/conf.d/turn.yaml create mode 100644 synapse/homeserver.yaml create mode 100644 synapse/templates/0.1.html create mode 100644 synapse/templates/success.html create mode 100644 synapse/well-known-client.json create mode 100644 synapse/well-known-server.json create mode 100644 synapse/well-known-support.json create mode 100644 synapse/workers/README.md create mode 100644 synapse/workers/federation_receiver1.yaml create mode 100644 synapse/workers/federation_sender1.yaml create mode 100644 synapse/workers/initial_sync1.yaml create mode 100644 synapse/workers/login-log.yaml create mode 100644 synapse/workers/login.yaml create mode 100644 synapse/workers/media-log.yaml create mode 100644 synapse/workers/media.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c234679 --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# Eerst: GEEN PDF/PS IN GIT! +*.pdf +*.ps + +# ---> LyX +# Ignore LyX backup and autosave files +# http://www.lyx.org/ +*.lyx~ +*.lyx# + +# ---> Vim +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +# En geen vaults +/ansible/group_vars/matrix/vault.yaml + diff --git a/README.md b/README.md new file mode 100644 index 0000000..e0ba681 --- /dev/null +++ b/README.md @@ -0,0 +1,124 @@ +--- +gitea: none +include_toc: true +--- + +# A complete Matrix installation + +This documentation describes how to build a complete Matrix environment with +all bells and whistles. Not just the Synapse server, but (almost) every bit +you want. + +The main focus will be on the server itself, Synapse, but there's a lot more +than just that. + + +This documentation isn't ready yet, and if you find errors or room for improvement, +please let me know. You can do that via Matrix, obviously (`@hans:woefdram.nl`), via +e-mail (`docs@fediversity.eu`), or make a Pull Request. + + +## Overview + +A complete Matrix environment consists of many parts. Other than the Matrix +server itself (Synapse) there are all kinds of other things that we need: + +* [Synapse](https://element-hq.github.io/synapse/latest/) +* Webclient ([Element Web](https://github.com/element-hq/element-web)) +* [Element Call](https://github.com/element-hq/element-call) for audio/video +conferencing +* Management with [Synapse-Admin](https://github.com/Awesome-Technologies/synapse-admin) +* Moderation with [Draupnir](https://github.com/the-draupnir-project/Draupnir) +* [Consent +tracking](https://element-hq.github.io/synapse/latest/consent_tracking.html) +* Authentication via +[OpenID](https://element-hq.github.io/synapse/latest/openid.html) (later) +* Several [bridges](https://matrix.org/ecosystem/bridges/) (later) + + +# Overview + +This documentation aims to describe the installation of a complete Matrix +platform, with all bells and whistles. Several components are involved and +finishing the installation of one can be necessary for the installation of the +next. + +Before you start, make sure you take a look at the [checklist](checklist.md). + +These are the components we're going to use: + + +## Synapse + +This is the core component: the Matrix server itself, you should probably +install this first. + +Because not every usecase is the same, we'll describe two different +architectures: + +** [Monolithic](synapse) + +This is the default way of installing Synapse, this is suitable for scenarios +with not too many users, and, importantly, users do not join many very crowded +rooms. + +** [Worker-based](synapse/workers) + +For servers that get a bigger load, for example those that host users that use +many big rooms, we'll describe how to process that higher load by distributing +it over workers. + + +## PostgreSQL + +This is the database Synapse uses. This should be the first thing you install +after Synapse, and once you're done, reconfigure the default Synapse install +to use PostgreSQL. + +If you have already added stuff to the SQLite database that Synapse installs +by default that you don't want to lose: [here's how to migrate from SQLite to +PostgreSQL](https://element-hq.github.io/synapse/latest/postgres.html#porting-from-sqlite). + + +## nginx + +We need a webserver for several things, see how to [configure nginx](nginx) +here. + +If you install this, make sure to check which certificates you need, fix the +DNS entries and probably keep TTL for for those entries very low until after +the installation, when you know everything's working. + + +## Element Call + +Element Call is the new way to have audio and video conferences, both +one-on-one and with groups. This does not use Jitsi and keeps E2EE intact. See +how to [setup and configure it](element-call). + + +# Element Web + +This is the fully-fledged web client, which is very [easy to set +up](element-web). + + +# TURN + +We may need a TURN server, and we'll use +[coturn](coturn) for that. + +It's apparently also possible to use the built-in TURN server in Livekit, +which we'll use if we use [Element Call](element-call). It's either/or, so make +sure you pick the right approach. + +You could possibly use both coturn and LiveKit, if you insist on being able to +use both legacy and Element Call functionality. This is not documented here +yet. + + +# Draupnir + +With Draupnir you can do moderation. It requires a few changes to both Synapse +and nginx, here's how to [install and configure Draupnir](draupnir). + diff --git a/checklist.md b/checklist.md new file mode 100644 index 0000000..da10d48 --- /dev/null +++ b/checklist.md @@ -0,0 +1,97 @@ +# Checklist + +Before you dive in and start installing, you should do a little planning +ahead. Ask yourself what you expect from your server. + +Is it a small server, just for yourself and some friends and family, or for +your hundreds of colleagues at work? Is it for private use, or do you need +decent moderation tools? Do you need audio and videoconferencing or not? + + +# Requirements + +It's difficult to specify hardware requirements upfront, because they don't +really depend on the number of users you have, but on their behaviour. A +server with users who don't engage in busy rooms like +[#matrix:matrix.org](https://matrix.to/#/#matrix:matrix.org) doesn't need more +than 2 CPU cores, 8GB of RAM and 50GB of diskspace. + +A server with users who do join very busy rooms, can easily eat 4 cores and +16GB of RAM. Or more. Or even much more. If you have a public server, where +unknown people can register new accounts, you'll probably need a bit more +oompf (and [moderation](draupnir)). + +During its life, the server may need more resources, if users change +their behaviour. Or less. There's no one-size-fits-all approach. + +If you have no idea, you should probably start with 2 cores, 8GB RAM and some +50GB diskspace, and follow the [monolithic setup](synapse). + +If you expect a higher load (you might get there sooner than you think), you +should probably follow the [worker-based setup](synapse/workers), because +changing the architecture from monolithic to worker-based once the server is +already in use, is a tricky task. + +Here's a ballpark figure. Remember, your mileage will probably vary. And +remember, just adding RAM and CPU doesn't automatically scale: you'll need to +tune [PostgreSQL](postgresql/README.md#tuning) and your workers as well so +that your hardware is optimally used. + +| Scenario | Architecture | CPU | RAM | Diskspace (GB) | +| :------------------------------------ | :-----------------------------: | :----: | :----: | :------------: | +| Personal, not many very busy rooms | [monolithic](synapse) | 2 | 8GB | 50 | +| Private, users join very busy rooms | [worker-based](synapse/workers) | 4 | 16GB | 100 | +| Public, many users in very busy rooms | [worker-based](synapse/workers) | 8 | 32GB | 250 | + + +# DNS and certificates + +You'll need to configure several things in DNS, and you're going to need a +couple of TLS-certificates. Best to configure those DNS entries first, so that +you can quickly generate the certificates once you're there. + +It's usually a good idea to keep the TTL of all these records very low while +installing and configuring, so that you can quickly change records without +having to wait for the TTL to expire. Setting a TTL of 300 (5 minutes) should +be fine. Once everything is in place and working, you should probably increase +it to a more production ready value, like 3600 (1 hour) or more. + +What do you need? Well, first of all you need a domain. In this documentation +we'll use `example.com`, you'll need to substitute that with your own domain. + +Under the top of that domain, you'll need to host 2 files under +`/.well-known`, so you'll need a webserver there, using a valid +TLS-certificate. This doesn't have to be the same machine as the one you're +installing Synapse on. In fact, it usually isn't. + +Assuming you're hosting Matrix on the machine `matrix.example.com`, you need +at least an `A` record in DNS, and -if you have IPv6 support, which you +should- an `AAAA` record too. **YOU CAN NOT USE A CNAME FOR THIS RECORD!** +You'll need a valid TLS-certificate for `matrix.example.com` too. + +You'll probably want the webclient too, so that users aren't forced to use an +app on their phone or install the desktop client on their PC. You should never +run the web client on the same name as the server, that opens you up for all +kinds of Cross-Site-Scripting attack. We'll assume you use +`element.example.com` for the web client. You need a DNS entry for that. This +can be a CNAME, but make sure you have a TLS-certificate with the correct name +on it. + +If you install a [TURN-server](coturn), either for legacy calls or for [Element +Call](element-call) (or both), you need a DNS entry for that too, and -again- a +TLS-certificate. We'll use `turn.example.com` for this. + +If you install Element Call (and why shouldn't you?), you need a DNS entry plus +certificate for that, let's assume you use `call.example.com` for that. This +can be a CNAME again. Element Call uses [LiveKit](element-call#livekit) for the +actual processing of audio and video, and that needs its own DNS entry and certificate +too. We'll use `livekit.example.com`. + +| FQDN | Use | Comment | +| :-------------------- | :--------------------- | :--------------------------------------- | +| `example.com` | Hosting `.well-known` | This is the `server_name` | +| `matrix.example.com` | Synapse server | This is the `base_url`, can't be `CNAME` | +| `element.example.com` | Webclient | | +| `turn.example.com` | TURN / Element Call | Highly recommended | +| `call.example.com` | Element Call | Optional | +| `livekit.example.com` | LiveKit SFU | Optional, needed for Element Call | diff --git a/coturn/README.md b/coturn/README.md new file mode 100644 index 0000000..4468f0d --- /dev/null +++ b/coturn/README.md @@ -0,0 +1,181 @@ +--- +gitea: none +include_toc: true +--- + +# TURN server + +You need a TURN server to connect participants that are behind a NAT firewall. +Because IPv6 doesn't really need TURN, and Chrome can get confused if it has +to use TURN over IPv6, we'll stick to a strict IPv4-only configuration. + +Also, because VoIP traffic is only UDP, we won't do TCP. + +TURN-functionality can be offered by coturn and LiveKit alike: coturn is used +for legacy calls (only one-on-one, supported in Element Android), whereas +Element Call (supported by ElementX, Desktop and Web) uses LiveKit. + +In our documentation we'll enable both, which is probably not the optimal +solution, but at least it results in a system that supports old and new +clients. + +Here we'll describe coturn, the dedicated ICE/STUN/TURN server that needs to +be configured in Synapse, [LiveKit](../element-call#livekit) has its own page. + +# Installation + +Installation is short: + +``` +apt install coturn +``` + +For sake of maintainability we'll move the only configuration file into its +own directoy: + +``` +mkdir /etc/coturn +mv /etc/turnserver.conf /etc/coturn +``` + +We need to tell systemd to start it with the configuration file on the new +place. Edit the service file with: + +``` +systemctl edit coturn +``` + +Contrary to what the comment suggests, only the parts you add will override +the content that's already there. We have to "clean" the `ExecStart` first, +before we assign a new line to it, so this is the bit we add: + +``` +[Service] +ExecStart= +ExecStart=/usr/bin/turnserver -c /etc/coturn/turnserver.conf --pidfile=/etc/coturn/run/turnserver.pid +``` + +Create the directory `/etc/coturn/run` and chgrp it to `turnserver`, so that +coturn can write its pid there: `/run/turnserver.pid` can't be written because +coturn doesn't run as root. + +This prepares us for the next step: configuring the whole thing. + + +# DNS and certificate {#dnscert} + +As stated before, we only use IPv4, so a CNAME to our machine that also does +IPv6 is a bad idea. Fix a new entry in DNS for TURN only, we'll use +`turn.example.com` here. + +Make sure this entry only has an A record, no AAAA. + +Get a certificate for this name: + +``` +certbot certonly --nginx -d turn.example.com +``` + +This assumes you've already setup and started nginx (see [nginx](../nginx)). + +{#fixssl} +The certificate files reside under `/etc/letsencrypt/live`, but coturn and +LiveKit don't run as root, and can't read them. Therefore we create the directory +`/etc/coturn/ssl` where we copy the files to. This script should be run after +each certificate renewal: + +``` +#!/bin/bash + +# This script is hooked after a renewal of the certificate, so that the +# certificate files are copied and chowned, and made readable by coturn: + +cd /etc/coturn/ssl +cp /etc/letsencrypt/live/turn.example.com/{fullchain,privkey}.pem . +chown turnserver:turnserver *.pem + +# Make sure you only start/restart the servers that you need! +systemctl try-reload-or-restart coturn livekit-server + +``` + +Run this automatically after every renewal by adding this line to +`/etc/letsencrypt/renewal/turn.example.com.conf`: + +``` +renew_hook = /etc/coturn/fixssl +``` + +Yes, it's a bit primitive and could (should?) be polished. But for now: it +works. This will copy and chown the certificate files and restart coturn +and/or LiveKit, depending on if they're running or not. + + +# Configuration {#configuration} + +Synapse's documentation gives a reasonable [default +config](https://element-hq.github.io/synapse/latest/setup/turn/coturn.html). + +We'll need a shared secret that Synapse can use to control coturn, so let's +create that first: + +``` +pwgen -s 64 1 +``` + +Now that we have this, we can configure our configuration file under +`/etc/coturn/turnserver.conf`. + +``` +# We don't use the default ports, because LiveKit uses those +listening-port=3480 +tls-listening-port=5351 + +# We don't need more than 10000 connections: +min-port=40000 +max-port=49999 + +use-auth-secret +static-auth-secret= + +realm=turn.example.com +user-quota=12 +total-quota=1200 + +# Of course: substitute correct IPv4 address: +listening-ip=111.222.111.222 + +# VoIP traffic is only UDP +no-tcp-relay + +# coturn doesn't run as root, so the certificate has +# to be copied/chowned here. +cert=/etc/coturn/ssl/fullchain.pem +pkey=/etc/coturn/ssl/privkey.pem + +denied-peer-ip=0.0.0.0-255.255.255.255 +denied-peer-ip=127.0.0.0-0.255.255.255 +denied-peer-ip=10.0.0.0-10.255.255.255 +denied-peer-ip=172.16.0.0-172.31.255.255 +denied-peer-ip=192.168.0.0-192.168.255.255 +denied-peer-ip=100.64.0.0-100.127.255.255 +denied-peer-ip=192.0.0.0-192.0.0.255 +denied-peer-ip=169.254.0.0-169.254.255.255 +denied-peer-ip=192.88.99.0-192.88.99.255 +denied-peer-ip=198.18.0.0-198.19.255.255 +denied-peer-ip=192.0.2.0-192.0.2.255 +denied-peer-ip=198.51.100.0-198.51.100.255 +denied-peer-ip=203.0.113.0-203.0.113.255 + +# We do only IPv4 +allocation-default-address-family="ipv4" + +# No weak TLS +no-tlsv1 +no-tlsv1_1 +``` + +All other options in the configuration file are either commented out, or +defaults. + +Make sure you've opened the correct ports in the [firewall](../firewall). diff --git a/coturn/turnserver.conf b/coturn/turnserver.conf new file mode 100644 index 0000000..cfff14d --- /dev/null +++ b/coturn/turnserver.conf @@ -0,0 +1,119 @@ +# Coturn TURN SERVER configuration file + +# Only IPv4, IPv6 can confuse some software +listening-ip=111.222.111.222 + +# Listening port for TURN (UDP and TCP): +listening-port=3480 + +# Listening port for TURN TLS (UDP and TCP): +tls-listening-port=5351 + +# Lower and upper bounds of the UDP relay endpoints: +# (default values are 49152 and 65535) +# +min-port=40000 +max-port=49999 + +use-auth-secret +static-auth-secret= + +realm=turn.example.com + + +# Per-user allocation quota. +# default value is 0 (no quota, unlimited number of sessions per user). +# This option can also be set through the database, for a particular realm. +user-quota=12 + +# Total allocation quota. +# default value is 0 (no quota). +# This option can also be set through the database, for a particular realm. +total-quota=1200 + +# Uncomment if no TCP relay endpoints are allowed. +# By default TCP relay endpoints are enabled (like in RFC 6062). +# +no-tcp-relay + +# Certificate file. +# Use an absolute path or path relative to the +# configuration file. +# Use PEM file format. +cert=/etc/coturn/ssl/fullchain.pem + +# Private key file. +# Use an absolute path or path relative to the +# configuration file. +# Use PEM file format. +pkey=/etc/coturn/ssl/privkey.pem + +# Option to redirect all log output into system log (syslog). +# +syslog + +# Option to allow or ban specific ip addresses or ranges of ip addresses. +# If an ip address is specified as both allowed and denied, then the ip address is +# considered to be allowed. This is useful when you wish to ban a range of ip +# addresses, except for a few specific ips within that range. +# +# This can be used when you do not want users of the turn server to be able to access +# machines reachable by the turn server, but would otherwise be unreachable from the +# internet (e.g. when the turn server is sitting behind a NAT) +# +denied-peer-ip=0.0.0.0-255.255.255.255 +denied-peer-ip=127.0.0.0-0.255.255.255 +denied-peer-ip=10.0.0.0-10.255.255.255 +denied-peer-ip=172.16.0.0-172.31.255.255 +denied-peer-ip=192.168.0.0-192.168.255.255 +denied-peer-ip=100.64.0.0-100.127.255.255 +denied-peer-ip=192.0.0.0-192.0.0.255 +denied-peer-ip=169.254.0.0-169.254.255.255 +denied-peer-ip=192.88.99.0-192.88.99.255 +denied-peer-ip=198.18.0.0-198.19.255.255 +denied-peer-ip=192.0.2.0-192.0.2.255 +denied-peer-ip=198.51.100.0-198.51.100.255 +denied-peer-ip=203.0.113.0-203.0.113.255 + + +# TURN server allocates address family according TURN client requested address family. +# If address family not requested explicitly by the client, then it falls back to this default. +# The standard RFC explicitly define that this default must be IPv4, +# so use other option values with care! +# Possible values: "ipv4" or "ipv6" or "keep" +# "keep" sets the allocation default address family according to +# the TURN client allocation request connection address family. +allocation-default-address-family="ipv4" + +# Turn OFF the CLI support. +# By default it is always ON. +# See also options cli-ip and cli-port. +# +no-cli + +# Do not allow an TLS/DTLS version of protocol +# +no-tlsv1 +no-tlsv1_1 + +# Disable RFC5780 (NAT behavior discovery). +# +# Strongly encouraged to use this option to decrease gain factor in STUN +# binding responses. +# +no-rfc5780 + +# Disable handling old STUN Binding requests and disable MAPPED-ADDRESS +# attribute in binding response (use only the XOR-MAPPED-ADDRESS). +# +# Strongly encouraged to use this option to decrease gain factor in STUN +# binding responses. +# +no-stun-backward-compatibility + +# Only send RESPONSE-ORIGIN attribute in binding response if RFC5780 is enabled. +# +# Strongly encouraged to use this option to decrease gain factor in STUN +# binding responses. +# +response-origin-only-with-rfc5780 diff --git a/draupnir/README.md b/draupnir/README.md new file mode 100644 index 0000000..bc4710c --- /dev/null +++ b/draupnir/README.md @@ -0,0 +1,130 @@ +--- +gitea: none +include_toc: true +--- + +# Draupnir + +Draupnir is the way to do moderation. It can exchange banlists with other +servers, and drop reports that people send into its moderation room so that +moderators can act upon them. + +Start by creating a room where moderators can give Draupnir commands. This +room should not be encrypted. Then create a user for Draupnir, this user +should ideally be an admin user. + +Once you've created the user, log in as this user, maybe set an avatar, join +the room you've created and then copy the access token. This token is used by +the Draupnir software to login. + +After that, close the window or client, but +do not logout. If you logout, the token will be invalidated. + +Make sure you have the right npm, Node.js, yarn and what-have-you ([see +Draupnir's documentation](https://the-draupnir-project.github.io/draupnir-documentation/bot/setup_debian)) +and prepare the software: + +``` +mkdir /opt +cd /opt +git clone https://github.com/the-draupnir-project/Draupnir.git +cd Draupnir +git fetch --tags +mkdir datastorage +yarn global add corepack +useradd -m draupnir +chown -R draupnir:draupnir +``` + +Now, "compile" the stuff as user draupnir: + +``` +sudo -u draupnir bash -c "install yarn" +sudo -u draupnir bash -c "yarn build" +``` + +When this is completed successfully, it's time to configure Draupnir. + + +# Configuration + +Under `config` you'll find the default configuration file, `default.yaml`. +Copy it to `production.yaml` and change what you must. + +| Option | Value | Meaning | +| :---- | :---- | :---- | +| `homeserverUrl` | `http://localhost:8008` | Where to communicate with Synapse when using network port| +| `homeserverUrl` | `http://unix:/run/matrix-synapse/incoming_main.sock` | Where to communicate with Synapse when using UNIX sockets (see [Workers](../synapse/workers.md)) | +| `rawHomeserverUrl` | `https://matrix.example.com` | Same as `server_name` | +| `accessToken` | access token | Copy from login session or create in [Synapse Admin](../synapse-admin)) | +| `password` | password | Password for the account | +| `dataPath` | `/opt/Draupnir/datastorage` | Storage | +| `managementRoom` | room ID | Room where moderators command Draupnir | + +This should give a working bot. + +There are a few other bits that you probably want to change. Draupnir can +direct reports to the management room, this is what you should change to +activate that: + +``` +web: + enabled: true + port: 8082 + address: ::1 + abuseReporting: + enabled: true + +pollReports: true +displayReports: true +``` + +For this to work (for reports to reach Draupnir) you'll need to configure +nginx to forward requests for reports to Draupnir: + +``` +location ~ ^/_matrix/client/(r0|v3)/rooms/([^/]*)/report/(.*)$ { + # The r0 endpoint is deprecated but still used by many clients. + # As of this writing, the v3 endpoint is the up-to-date version. + + # Alias the regexps, to ensure that they're not rewritten. + set $room_id $2; + set $event_id $3; + proxy_pass http://[::1]:8082/api/1/report/$room_id/$event_id; +} + +# Reports that need to reach Synapse (not sure if this is used) +location /_synapse/admin/v1/event_reports { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + +location ~ ^/_synapse/admin/v1/rooms/([^/]*)/context/(.*)$ { + set $room_id $2; + set $event_id $3; + proxy_pass http://localhost:8008/_synapse/admin/v1/rooms/$room_id/context/$event_id; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; +} +``` + +# Rate limiting + +Normal users are rate limited, to prevent them from flooding the server. Draupnir +is meant to stop those events, but if it it itself rate limited, it won't work +all that well. + +How rate limiting is configured server-wide is documented in [Synapse's +documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=ratelimiting#ratelimiting). +Overriding is, unfortunately, not something you can easily configure in the +configuration files. You'll have to do that in the database itself: + +``` +INSERT INTO ratelimit_override VALUES ('@draupnir:example.com', 0, 0); +``` diff --git a/element-call/README.md b/element-call/README.md new file mode 100644 index 0000000..2d42d72 --- /dev/null +++ b/element-call/README.md @@ -0,0 +1,375 @@ +--- +gitea: none +include_toc: true +--- + +# Overview + +Element Call consists of a few parts, you don't have to host all of them +yourself. In this document, we're going to host everything ourselves, so +here's what you need. + +* **lk-jwt**. This authenticates Synapse users to LiveKit. +* **LiveKit**. This is the "SFU", which actually handles the audio and video, and does TURN. +* **Element Call widget**. This is basically the webapplication, the user interface. + +As mentioned in the [checklist](../checklist.md) you need to define these +three entries in DNS and get certificates for them: + +* `turn.example.com` +* `livekit.example.com` +* `call.example.com` + +You may already have DNS and TLS for `turn.example.com`, as it is also used +for [coturn](../coturn). + +For more inspiraten, check https://sspaeth.de/2024/11/sfu/ + + +# LiveKit {#livekit} + +The actual SFU, Selective Forwarding Unit, is LiveKit; this is the part that +handles the audio and video feeds and also does TURN (this TURN-functionality +does not support the legacy calls, you'll need [coturn](coturn) for that). + +Downloading and installing is easy: download the [binary from +Github](https://github.com/livekit/livekit/releases/download/v1.8.0/livekit_1.8.0_linux_amd64.tar.gz) + to /usr/local/bin, chown it to root:root and you're done. + +The quickest way to do precisely that, is to run the script: + +``` +curl -sSL https://get.livekit.io | bash +``` + +You can do this as a normal user, it will use sudo to do its job. + +While you're at it, you might consider installing the cli tool as well, you +can use it -for example- to generate tokens so you can [test LiveKit's +connectivity](https://livekit.io/connection-test): + +``` +curl -sSL https://get.livekit.io/cli | bash +``` + +Configuring LiveKit is [documented +here](https://docs.livekit.io/home/self-hosting/deployment/). We're going to +run LiveKit under authorization of user `turnserver`, the same users we use +for [coturn](coturn). This user is created when installing coturn, so if you +haven't installed that, you should create the user yourself: + +``` +adduser --system turnserver +``` + +## Configure {#keysecret} + +Start by creating a key and secret: + +``` +livekit-server generate-keys +``` + +This key and secret have to be fed to lk-jwt-service too, [see here](#jwtconfig). +Create the directory for LiveKit's configuration: + +``` +mkdir /etc/livekit +chown root:turnserver /etc/livekit +chmod 750 /etc/livekit +``` + +Create a configuration file for livekit, `/etc/livekit/livekit.yaml`: + +``` +port: 7880 +bind_addresses: + - ::1 +rtc: + tcp_port: 7881 + port_range_start: 50000 + port_range_end: 60000 + use_external_ip: true + enable_loopback_candidate: false +turn: + enabled: true + domain: livekit.example.com + cert_file: /etc/coturn/ssl/fullchain.pem + key_file: /etc/coturn/ssl/privkey.pem + tls_port: 5349 + udp_port: 3478 + external_tls: true +keys: + # KEY: SECRET were generated by "livekit-server generate-keys" + : +``` + +Being a bit paranoid: make sure LiveKit can only read this file, not write it: + +``` +chown root:turnserver /etc/livekit/livekit.yaml +chmod 640 /etc/livekit/livekit.yaml +``` + +Port `7880` is forwarded by nginx: authentication is also done there, and that +bit has to be forwarded to `lk-jwt-service` on port `8080`. Therefore, we +listen only on localhost. + +The TURN ports are the normal, default ones. If you also use coturn, make sure +it doesn't use the same ports as LiveKit. Also, make sure you open the correct +ports in the [firewall](../firewall). + + +## TLS certificate + +The TLS-certificate files are not in the usual place under +`/etc/letsencrypt/live`, see [DNS and +certificate](../coturn/README.md#dnscert) under coturn why that is. + +As stated before, we use the same user as for coturn. Because this user does +not have the permission to read private keys under `/etc/letsencrypt`, we copy +those files to a place where it can read them. For coturn we copy them to +`/etc/coturn/ssl`, and if you use coturn and have this directory, LiveKit can +read them there too. + +If you don't have coturn installed, you should create a directory under +`/etc/livekit` and copy the files to there. Modify the `livekit.yaml` file and +the [script to copy the files](../coturn/README.md#fixssl) to use that +directory. Don't forget to update the `renew_hook` in Letsencrypt if you do. + +The LiveKit API listens on localhost, IPv6, port 7880. Traffic to this port is +forwarded from port 443 by nginx, which handles TLS, so it shouldn't be reachable +from the outside world. + +See [LiveKit's config documentation](https://github.com/livekit/livekit/blob/master/config-sample.yaml) +for more options. + + +## Systemd + +Now define a systemd servicefile, like this: + +``` +[Unit] +Description=LiveKit Server +After=network.target +Documentation=https://docs.livekit.io + +[Service] +User=turnserver +Group=turnserver +LimitNOFILE=500000 +Restart=on-failure +WorkingDirectory=/etc/livekit +ExecStart=/usr/local/bin/livekit-server --config /etc/livekit/livekit.yaml + +[Install] +WantedBy=multi-user.target +``` + +Enable and start it. + +Clients don't know about LiveKit yet, you'll have to give them the information +via the `.well-known/matrix/client`: add this bit to it to point them at the +SFU: + +``` +"org.matrix.msc4143.rtc_foci": [ + { + "type": "livekit", + "livekit_service_url": "https://livekit.example.com" + } + ] +``` + +Make sure it is served as `application/json`, just like the other .well-known +files. + + +# lk-jwt-service {#lkjwt} + +lk-jwt-service is a small Go program that handles authorization tokens for use with LiveKit. +You'll need a Go compiler, but the one Debian provides is too old (at the time +of writing this, at least), so we'll install the latest one manually. Check +[the Go website](https://go.dev/dl/) to see which version is the latest, at +the time of writing it's 1.23.3, so we'll install that: + +``` +wget https://go.dev/dl/go1.23.3.linux-amd64.tar.gz +tar xvfz go1.23.3.linux-amd64.tar.gz +cd go/bin +export PATH=`pwd`:$PATH +cd +``` + +This means you now have the latest Go compiler in your path, but it's not +installed system-wide. If you want that, copy the whole `go` directory to +`/usr/local` and add `/usr/local/go/bin` to everybody's $PATH. + +Get the latest lk-jwt-service source code and comile it (preferably *NOT* as root): + +``` +git clone https://github.com/element-hq/lk-jwt-service.git +cd lk-jwt-service +go build -o lk-jwt-service +``` + +Now, compile: + +``` +cd lk-jwt-service +go build -o lk-jwt-service +``` + +Copy and chown the binary to `/usr/local/sbin` (yes: as root): + +``` +cp ~user/lk-jwt-service/lk-jwt-service /usr/local/sbin +chown root:root /usr/local/sbin/lk-jwt-service +``` + + +## Systemd + +Create a service file for systemd, something like this: + +``` +# This thing does authorization for Element Call + +[Unit] +Description=LiveKit JWT Service +After=network.target + +[Service] +Restart=always +User=www-data +Group=www-data +WorkingDirectory=/etc/lk-jwt-service +EnvironmentFile=/etc/lk-jwt-service/config +ExecStart=/usr/local/sbin/lk-jwt-service + +[Install] +WantedBy=multi-user.target +``` + +## Configuration {#jwtconfig} + +We read the options from `/etc/lk-jwt-service/config`, +which we make read-only for group `www-data` and non-accessible by anyone +else. + +``` +mkdir /etc/lk-jwt-service +vi /etc/lk-jwt-service/config +chgrp -R root:www-data /etc/lk-jwt-service +chmod 750 /etc/lk-jwt-service +``` + +This is what you should put into that config file, +`/etc/lk-jwt-service/config`. The `LIVEKIT_SECRET` and `LIVEKIT_KEY` are the +ones you created while [configuring LiveKit](#keysecret). + +``` +LIVEKIT_URL=wss://livekit.example.com +LIVEKIT_SECRET=xxx +LIVEKIT_KEY=xxx +LK_JWT_PORT=8080 +``` + +Change the permission accordingly: + +``` +chown root:www-data /etc/lk-jwt-service/config +chmod 640 /etc/lk-jwt-service/config +``` + +Now enable and start this thing: + +``` +systemctl enable --now lk-jwt-service +``` + + +# Element Call widget {#widget} + +This is a Node.js thingy, so start by installing yarn. Unfortunately both npm +and `yarnpkg` in Debian are antique, so we need to update them after installation. +Install Node.js and upgrade everything. Do not do this as root, we'll only +need to "compile" Element Call once. + +See [the Node.js +website](https://nodejs.org/en/download/package-manager/current) for +instructions. + + +``` +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.0/install.sh | bash +``` + +Exit and login again to set some environment variables (yes, the installation +changes .bashrc). Then install and upgrade: + +``` +nvm install 23 +sudo apt install yarnpkg +/usr/share/nodejs/yarn/bin/yarn set version stable +/usr/share/nodejs/yarn/bin/yarn install +``` + +Yes, this whole Node.js, yarn and npm thing is a mess. Better documentation +could be written, but for now this will have to do. + +Now clone the Element Call repository and "compile" stuff (again: not as +root): + +``` +git clone https://github.com/element-hq/element-call.git +cd element-call +/usr/share/nodejs/yarn/bin/yarn +/usr/share/nodejs/yarn/bin/yarn build +``` + +If it successfully compiles (warnings are more or less ok, errors aren't), you will +find the whole shebang under "dist". Copy that to `/var/www/element-call` and point +nginx to it ([see nginx](../nginx#callwidget)). + + +## Configuring + +It needs a tiny bit of configuring. The default configuration under `config/config.sample.json` +is a good place to start, copy it to `/etc/element-call` and change where +necessary: + +``` +{ + "default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.example.com", + "server_name": "example.com" + } + }, + + "livekit": { + "livekit_service_url": "https://livekit.example.com" + }, + + "features": { + "feature_use_device_session_member_events": true + }, + + "eula": "https://www.example.com/online-EULA.pdf" +} +``` + +Now tell the clients about this widget. Create +`.well-known/element/element.json`, which is opened by Element Web, Element Desktop +and ElementX to find the Element Call widget. It should look this: + +``` +{ + "call": { + "widget_url": "https://call.example.com" + } +} +``` + diff --git a/element-call/element.json b/element-call/element.json new file mode 100644 index 0000000..7885725 --- /dev/null +++ b/element-call/element.json @@ -0,0 +1,6 @@ +{ + "call": + { + "widget_url": "https://call.example.com" + } +} diff --git a/element-web/README.md b/element-web/README.md new file mode 100644 index 0000000..f3dbfee --- /dev/null +++ b/element-web/README.md @@ -0,0 +1,70 @@ +--- +gitea: none +include_toc: true +--- + +# Element-web + +Element-web is the webinterface, Element in a browser. You'll find the source +and [documentation on installing and +configuring](https://github.com/element-hq/element-web/blob/develop/docs/install.md) +on Github. + +You should never run Element-web on the same FQDN as your Synapse-server, +because of XSS problems. So start by defining a new FQDN for where you will +publish Element-web, and get a certificate for that (don't forget to +[automatically reload nginx after the certificate renewal](../nginx/README.md#certrenew)). + +We'll use `element.example.com` here. + + +# Installing on Debian {#debian} + +Installing it on Debian is very easy indeed: + +``` +wget -O /usr/share/keyrings/element-io-archive-keyring.gpg https://packages.element.io/debian/element-io-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/element-io-archive-keyring.gpg] https://packages.element.io/debian/ default main" | + tee /etc/apt/sources.list.d/element-io.list +apt update +apt install element-web +``` + + +# Configuration {#configuration} + +Configuring is done in `config.json`, which needs to go into `/etc/element-web` +in a Debian install. See the [documentation on +Github](https://github.com/element-hq/element-web/blob/develop/docs/config.md). + +The most important thing to change is the `default_server_config`. Make sure +it's something like this: + +``` +"default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.example.com", + "server_name": "example.com" + } +}, +``` + +Of course, substitute the correct domain and server name. + + +# Browser notes {#browsernotes} + +Element-web runs in the browser, on JavaScript. Yours truly found out that +running [JShelter](https://jshelter.org/) throws a spanner in the works, so +you'll have to disable it for the URL you publish Element-web. + +Also, Element-web is rather dependent on the version of your browser, so make +sure you keep yours up-to-date. Debian users, who run "Firefox ESR" should +know support for that is on a best effort basis, you might want to consider +using the "real" Firefox. [Debian packages are +available](https://support.mozilla.org/en-US/kb/install-firefox-linux#w_install-firefox-deb-package-for-debian-based-distributions-recommended). + +Element Web uses "workers", that are not installed in private windows. One +thing that won't work in a private window, is downloading (i.e. displaying) +images. If you don't see avatars and get "failed to download" messages, check +if you're running Element Web in a private window. diff --git a/firewall/README.md b/firewall/README.md new file mode 100644 index 0000000..9e1ba33 --- /dev/null +++ b/firewall/README.md @@ -0,0 +1,25 @@ +# Firewall + +Several ports need to be opened in the firewall, this is a list of all ports +that are needed by the components we describe in this document. + +Those for nginx are necessary for Synapse to work, the ones for coturn and +LiveKit only need to be opened if you run those servers. + + +| Port(s) / range | IP version | Protocol | Application | +| :-------------: | :--------: | :------: | :--------------------- | +| 80, 443 | IPv4/IPv6 | TCP | nginx, reverse proxy | +| 8443 | IPv4/IPv6 | TCP | nginx, federation | +| 3478 | IPv4 | UDP | LiveKit TURN | +| 5349 | IPv4 | TCP | LiveKit TURN TLS | +| 7881 | IPv4/IPv6 | TCP | LiveKit RTC | +| 50000-60000 | IPv4/IPv6 | TCP/UDP | LiveKit RTC | +| 3480 | IPv4 | TCP/UDP | coturn TURN | +| 5351 | IPv4 | TCP/UDP | coturn TURN TLS | +| 40000-49999 | IPv4 | TCP/UDP | coturn RTC | + + +The ports necessary for TURN depend very much on the specific configuration of +[coturn](../coturn#configuration) and/or [LiveKit](../element-call#livekit). + diff --git a/nginx/README.md b/nginx/README.md new file mode 100644 index 0000000..ceef163 --- /dev/null +++ b/nginx/README.md @@ -0,0 +1,365 @@ +--- +gitea: none +include_toc: true +--- + +# Reverse proxy with nginx + +Clients connecting from the Internet to our Matrix environment will usually +use SSL/TLS to encrypt whatever they want to send. This is one thing that +nginx does better than Synapse. + +Furthermore, granting or denying access to specific endpoints is much easier +in nginx. + +Synapse listens only on localhost, so nginx has to pass connections on from +the wild west that is the Internet to our server listening on the inside. + + +# Installing + +Installing nginx and the [Let's Encrypt](https://letsencrypt.org/) plugin is +easy: + +``` +apt install nginx python3-certbot-nginx +``` + +Get your certificate for the base domain (which is probably not the machine on which +we're going to run Synapse): + +``` +certbot certonly --nginx --agree-tos -m system@example.com --non-interactive -d example.com +``` + +Get one for the machine on which we are going to run Synapse too: + +``` +certbot certonly --nginx --agree-tos -m system@example.com --non-interactive -d matrix.example.com +``` + +Substitute the correct e-mailaddress and FQDN, or course. + + +## Automatic renewal {#certrenew} + +Certificates have a limited lifetime, and need to be updated every once in a +while. This should be done automatically by Certbot, see if `systemctl +list-timers` lists `certbot.timer`. + +However, renewing the certificate means you'll have to restart the software +that's using it. We have 2 or 3 pieces of software that use certificates: +[coturn](../coturn) and/or [LiveKit](../element-call#livekit), and [nginx](../nginx). + +Coturn/LiveKit are special with regards to the certificate, see their +respective pages. For nginx it's pretty easy: tell Letsencrypt to restart it +after a renewal. + +You do this by adding this line to the `[renewalparams]` in +`/etc/letsencrypt/renewal/.conf`: + +``` +renew_hook = systemctl try-reload-or-restart nginx +``` + + +# Configuration of domain name {#configdomain} + +Let's start with the configuration on the webserver that runs on the domain +name itself, in this case `example.com`. + +Almost all traffic should be encrypted, so a redirect from http to https seems +like a good idea. + +However, `.well-known/matrix/client` has to be available via http and https, +so that should *NOT* be redirected to https. Some clients don't understand the +redirect and will therefore not find the server if you redirect everything. + +Under the `server_name` (the "domain name", the part after the username) you +will need a configuration like this: + +``` +server { + listen 80; + listen [::]:80; + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name example.com; + + location /.well-known/matrix/client { + return 200 '{ + "m.homeserver": {"base_url": "https://matrix.example.com"}, + }'; + default_type application/json; + } + + location /.well-known/matrix/server { + return 200 '{"m.server": "matrix.example.com"}'; + default_type application/json; + } + + location / { + if ($scheme = http) { + return 301 https://$host$request_uri; + } + } + + access_log /var/log/nginx/example_com-access.log; + error_log /var/log/nginx/example_com-error.log; + +} +``` + +This defines a server that listens on both http and https. It hands out two +.well-known entries over both http and https, and every other request over +http is forwarded to https. + +Be sure to substitute the correct values for `server_name`, `base_url` and the +certificate files (and [renew the certificate](#renewcert)). + +See this [full configuration example](domain.conf) with some extra stuff. + + +# Configuration of the reverse proxy + +For the actual proxy in front of Synapse, this is what you need: forward ports +443 and 8448 to Synapse, listening on localhost, and add a few headers so +Synapse know's who's on the other side of the line. + +``` +server { + listen 443 ssl; + listen [::]:443 ssl; + + # For the federation port + listen 8448 ssl default_server; + listen [::]:8448 ssl default_server; + + ssl_certificate /etc/letsencrypt/live/matrix.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/matrix.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name matrix.example.com; + + location ~ ^(/_matrix|/_synapse/client) { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + +} +``` + +Again, substitute the correct values. Don't forget to open the relevant ports +in the firewall. Ports 80 and 443 may already be open, 8448 is probably not. + +This is a very, very basic configuration; just enough to give us a working +service. See this [complete example](revproxy.conf) which also includes +[Draupnir](../draupnir) and a protected admin endpoint. + +# Element Web + +You can host the webclient on a different machine, but we'll run it on the +same one in this documentation. You do need a different FQDN however, you +can't host it under the same name as Synapse, such as: +``` +https://matrix.example.com/element-web +``` +So you'll need to create an entry in DNS and get a TLS-certificate for it (as +mentioned in the [checklist](../checklist.md)). + +Other than that, configuration is quite simple. We'll listen on both http and +https, and redirect http to https: + +``` +server { + listen 80; + listen [::]:80; + listen 443 ssl http2; + listen [::]:443 ssl http2; + + ssl_certificate /etc/letsencrypt/live/element.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/element.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name element.example.com; + + location / { + if ($scheme = http) { + return 301 https://$host$request_uri; + } + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Content-Security-Policy "frame-ancestors 'self'"; + } + + root /usr/share/element-web; + index index.html; + + access_log /var/log/nginx/elementweb-access.log; + error_log /var/log/nginx/elementweb-error.log; +} +``` + +This assumes Element Web is installed under `/usr/share/element-web`, as done +by the Debian package provided by Element.io. + +# Synapse-admin {#synapse-admin} + +If you also [install Synapse-Admin](../synapse-admin), you'll want to create +another vhost, something like this: + +``` +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/admin.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/admin.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name admin.example.com; + + root /var/www/synapse-admin; + + access_log /var/log/nginx/admin-access.log; + error_log /var/log/nginx/admin-error.log; +} +``` + +You'll need an SSL certificate for this, of course. But you'll also need to +give it access to the `/_synapse/admin` endpoint in Synapse. + +You don't want this endpoint to be available for just anybody on the Internet, +so restrict access to the IP-addresses from which you expect to use +Synapse-Admin. + +In `/etc/nginx/sites-available/synapse` you want to add this bit: + +``` +location ~ ^/_synapse/admin { + allow 127.0.0.1; + allow ::1; + allow 111.222.111.222; + allow dead:beef::/64; + deny all; + + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; +} +``` + +This means access to `/_synapse/admin` is only allowed for the addresses +mentioned, but will be forwarded to Synapse in exactly the same way as +"normal" requests. + + +# LiveKit {#livekit} + +If you run an SFU for Element Call, you need a virtual host for LiveKit. Make +sure you install, configure and run [Element Call LiveKit](../element-call#livekit). +Then create a virtual host much like this: + +``` +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/livekit.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/livekit.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name livekit.example.com; + + # This is lk-jwt-service + location ~ ^(/sfu/get|/healthz) { + proxy_pass http://[::1]:8080; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location / { + proxy_pass http://[::1]:7880; + proxy_set_header Connection "upgrade"; + proxy_set_header Upgrade $http_upgrade; + + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + access_log /var/log/nginx/livekit-access.log; + error_log /var/log/nginx/livekit-error.log; +} +``` + + +# Element Call widget {#callwidget} + +If you self-host the [Element Call widget](../element-call#widget), this +should be the configuration to publish that: + +``` +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/call.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/call.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name call.example.com; + + root /var/www/element-call; + + location /assets { + add_header Cache-Control "public, immutable, max-age=31536000"; + } + + location /apple-app-site-association { + default_type application/json; + } + + location /^config.json$ { + alias public/config.json; + default_type application/json; + } + + location / { + try_files $uri /$uri /index.html; + add_header Cache-Control "public, max-age=30, stale-while-revalidate=30"; + } + + access_log /var/log/nginx/call-access.log; + error_log /var/log/nginx/call-error.log; +} +``` + + +# Firewall + +For normal use, at least ports 80 and 443 must be openend, see [Firewall](../firewall). diff --git a/nginx/conf/call.conf b/nginx/conf/call.conf new file mode 100644 index 0000000..2c75bc2 --- /dev/null +++ b/nginx/conf/call.conf @@ -0,0 +1,34 @@ +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/call.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/call.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name call.example.com; + + root /var/www/element-call; + + location /assets { + add_header Cache-Control "public, immutable, max-age=31536000"; + } + + location /apple-app-site-association { + default_type application/json; + } + + location /^config.json$ { + alias public/config.json; + default_type application/json; + } + + location / { + try_files $uri /$uri /index.html; + add_header Cache-Control "public, max-age=30, stale-while-revalidate=30"; + } + + access_log /var/log/nginx/call-access.log; + error_log /var/log/nginx/call-error.log; +} diff --git a/nginx/conf/domain.conf b/nginx/conf/domain.conf new file mode 100644 index 0000000..ba83bd9 --- /dev/null +++ b/nginx/conf/domain.conf @@ -0,0 +1,61 @@ +server { + listen 80; + listen [::]:80; + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name example.com; + + location /.well-known/matrix/client { + return 200 '{ + "m.homeserver": {"base_url": "https://matrix.example.com"}, + "org.matrix.msc3575.proxy": {"url": "https://matrix.example.com"}, + "org.matrix.msc4143.rtc_foci":[ + {"type": "livekit", + "livekit_service_url": "https://livekit.example.com"} + ] + }'; + default_type application/json; + add_header 'Access-Control-Allow-Origin' '*'; + } + + location /.well-known/matrix/server { + return 200 '{"m.server": "matrix.example.com"}'; + default_type application/json; + } + + location /.well-known/matrix/support { + return 200 '{ "contacts": + [ + { "email_address": "admin@example.com", + "matrix_id": "@admin:example.com", + "role": "m.role.admin" }, + { "email_address": "security@example.com", + "matrix_id": "@john:example.com", + "role": "m.role.security" } + ], + "support_page": "https://www.example.com/matrix-support" + }'; + default_type application/json; + } + + + location /.well-known/element/element.json { + return 200 '{"call": {"widget_url": "https://call.example.com"}}'; + default_type application/json; + } + + location / { + if ($scheme = http) { + return 301 https://$host$request_uri; + } + } + + access_log /var/log/nginx/example-access.log; + error_log /var/log/nginx/example-error.log; +} diff --git a/nginx/conf/elementweb.conf b/nginx/conf/elementweb.conf new file mode 100644 index 0000000..e89cd83 --- /dev/null +++ b/nginx/conf/elementweb.conf @@ -0,0 +1,29 @@ +server { + listen 80; + listen [::]:80; + listen 443 ssl http2; + listen [::]:443 ssl http2; + + ssl_certificate /etc/letsencrypt/live/element.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/element.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name element.example.com; + + location / { + if ($scheme = http) { + return 301 https://$host$request_uri; + } + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Content-Security-Policy "frame-ancestors 'self'"; + } + + root /usr/share/element-web; + index index.html; + + access_log /var/log/nginx/elementweb-access.log; + error_log /var/log/nginx/elementweb-error.log; +} diff --git a/nginx/conf/livekit.conf b/nginx/conf/livekit.conf new file mode 100644 index 0000000..9f0b3b1 --- /dev/null +++ b/nginx/conf/livekit.conf @@ -0,0 +1,37 @@ +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/livekit.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/livekit.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name livekit.example.com; + + # This is lk-jwt-service + location ~ ^(/sfu/get|/healthz) { + proxy_pass http://[::1]:8080; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location / { + proxy_pass http://[::1]:7880; + proxy_set_header Connection "upgrade"; + proxy_set_header Upgrade $http_upgrade; + #add_header Access-Control-Allow-Origin "*" always; + + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + access_log /var/log/nginx/livekit-access.log; + error_log /var/log/nginx/livekit-error.log; +} diff --git a/nginx/conf/revproxy.conf b/nginx/conf/revproxy.conf new file mode 100644 index 0000000..277a436 --- /dev/null +++ b/nginx/conf/revproxy.conf @@ -0,0 +1,85 @@ +server { + listen 443 ssl; + listen [::]:443 ssl; + + # For the federation port + listen 8448 ssl; + listen [::]:8448 ssl; + + ssl_certificate /etc/letsencrypt/live/matrix.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/matrix.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name matrix.example.com; + + # Abuse reports get forwarded to Draupnir, listening on port 8082 + location ~ ^/_matrix/client/(r0|v3)/rooms/([^/]*)/report/(.*)$ { + # The r0 endpoint is deprecated but still used by many clients. + # As of this writing, the v3 endpoint is the up-to-date version. + + # Alias the regexps, to ensure that they're not rewritten. + set $room_id $2; + set $event_id $3; + proxy_pass http://[::1]:8082/api/1/report/$room_id/$event_id; + } + + # Reports that need to reach Synapse (not really sure if this is used) + location /_synapse/admin/v1/event_reports { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + location ~ ^/_synapse/admin/v1/rooms/([^/]*)/context/(.*)$ { + set $room_id $2; + set $event_id $3; + proxy_pass http://localhost:8008/_synapse/admin/v1/rooms/$room_id/context/$event_id; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + + # If you want the server version to be public: + location ~ ^/_synapse/admin/v1/server_version$ { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + + # The rest of the admin endpoint shouldn't be public + location ~ ^/_synapse/admin { + allow 127.0.0.1; + allow ::1; + allow 111.222.111.222; + allow dead:beef::/48; + deny all; + + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + + location ~ ^(/_matrix|/_synapse/client) { + proxy_pass http://localhost:8008; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + client_max_body_size 50M; + proxy_http_version 1.1; + } + + access_log /var/log/nginx/matrix-access.log; + error_log /var/log/nginx/matrix-error.log; +} + diff --git a/nginx/conf/synapse-admin.conf b/nginx/conf/synapse-admin.conf new file mode 100644 index 0000000..a1ad40d --- /dev/null +++ b/nginx/conf/synapse-admin.conf @@ -0,0 +1,16 @@ +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/letsencrypt/live/admin.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/admin.example.com/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/ssl/dhparams.pem; + + server_name admin.example.com; + + root /var/www/synapse-admin; + + access_log /var/log/nginx/admin-access.log; + error_log /var/log/nginx/admin-error.log; +} diff --git a/nginx/workers/README.md b/nginx/workers/README.md new file mode 100644 index 0000000..659f376 --- /dev/null +++ b/nginx/workers/README.md @@ -0,0 +1,397 @@ +--- +gitea: none +include_toc: true +--- + +# Reverse proxy for Synapse with workers + +Changing nginx's configuration from a reverse proxy for a normal, monolithic +Synapse to one for a Synapse that uses workers, is a big thing: quite a lot has to +be changed. + +As mentioned in [Synapse with workers](../../synapse/workers/README.md#synapse), +we're changing the "backend" from network sockets to UNIX sockets. + +Because we're going to have to forward a lot of specific requests to all kinds +of workers, we'll split the configuration into a few bits: + +* all `proxy_forward` settings +* all `location` definitions +* maps that define variables +* upstreams that point to the correct socket(s) with the correct settings +* settings for private access +* connection optimizations + +Some of these go into `/etc/nginx/conf.d` because they are part of the +configuration of nginx itself, others go into `/etc/nginx/snippets` because we +need to include them several times in different places. + +**Important consideration** + +This part isn't a quick "put these files in place and you're done": a +worker-based Synapse is tailor-made, there's no one-size-fits-all. This +documentation gives hints and examples, but in the end it's you who has to +decide what types of workers to use and how many, all depending on your +specific use case and the available hardware. + + + + +# Optimizations + +In the quest for speed, we are going to tweak several settings in nginx. To +keep things manageable, most of those tweaks go into separate configuration +files that are either automatically included (those under `/etc/nginx/conf.d`) +or explicitly where we need them (those under `/etc/nginx/snippets`). + +Let's start with a few settings that affect nginx as a whole. Edit these +options in `/etc/nginx/nginx.conf`: + +``` +pcre_jit on; +worker_rlimit_nofile 8192; +worker_connections 4096; +multi_accept off; +gzip_comp_level 2; +gzip_types application/javascript application/json application/x-javascript application/xml application/xml+rss image/svg+xml text/css text/javascript text/plain text/xml; +gzip_min_length 1000; +gzip_disable "MSIE [1-6]\."; +``` + +We're going to use lots of regular expressions in our config, `pcre_jit on` +speeds those up considerably. Workers get 8K open files, and we want 4096 +workers instead of the default 768. Workers can only accept one connection, +which is (in almost every case) proxy_forwarded, so we set `multi_accept off`. + +We change `gzip_comp_level` from 6 to 2, we expand the list of content that is +to be gzipped, and don't zip anything shorter than 1000 characters, instead of +the default 20. MSIE can take a hike... + +These are tweaks for the connection, save this in `/etc/ngnix/conf.d/conn_optimize.conf`. + +``` +client_body_buffer_size 32m; +client_header_buffer_size 32k; +client_max_body_size 1g; +http2_max_concurrent_streams 128; +keepalive_timeout 65; +keepalive_requests 100; +large_client_header_buffers 4 16k; +server_names_hash_bucket_size 128; +tcp_nodelay on; +server_tokens off; +``` + +We set a few proxy settings that we use in proxy_forwards other than to our +workers, save this to `conf.d/proxy_optimize.conf`: + +``` +proxy_buffer_size 128k; +proxy_buffers 4 256k; +proxy_busy_buffers_size 256k; +``` + +For every `proxy_forward` to our workers, we want to configure several settings, +and because we don't want to include the same list of settings every time, we put +all of them in one snippet of code, that we can include every time we need it. + +Create `/etc/nginx/snippets/proxy.conf` and put this in it: + +``` +proxy_connect_timeout 2s; +proxy_buffering off; +proxy_http_version 1.1; +proxy_read_timeout 3600s; +proxy_redirect off; +proxy_send_timeout 120s; +proxy_socket_keepalive on; +proxy_ssl_verify off; + +proxy_set_header Accept-Encoding ""; +proxy_set_header Host $host; +proxy_set_header X-Forwarded-For $remote_addr; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Connection $connection_upgrade; +proxy_set_header Upgrade $http_upgrade; + +client_max_body_size 50M; +``` + +Every time we use a `proxy_forward`, we include this snippet. There are 2 more +things we might set: trusted locations that can use the admin endpoints, and a +dedicated DNS-recursor. We include the `snippets/private.conf` in the +forwards to admin endpoints, so that not the entire Internet can play with it. +The dedicated nameserver is something you really want, because synchronising a +large room can easily result in 100.000+ DNS requests. You'll hit flood +protection on most servers if you do that. + +List the addresses from which you want to allow admin access in +`snippets/private.conf`: + +``` +allow 127.0.0.1; +allow ::1; +allow 12.23.45.78; +allow 87.65.43.21; +allow dead:beef::/48; +allow 2a10:1234:abcd::1; +deny all; +satisfy all; +``` + +Of course, subsitute these random addresses for the ones you trust. The +dedicated nameserver (if you have one, which is strongly recommended) should +be configured in `conf.d/resolver.conf`: + +``` +resolver [::1] 127.0.0.1 valid=60; +resolver_timeout 10s; +``` + + +# Maps {#maps} + +A map sets a variable based on, usually, another variable. One case we use this +is in determining the type of sync a client is doing. A normal sync, simply +updating an existing session, is a rather lightweight operation. An initial sync, +meaning a full sync because the session is brand new, is not so lightweight. + +A normal sync can be recognised by the `since` bit in the request: it tells +the server when its last sync was. If there is no `since`, we're dealing with +an initial sync. + +We want to forward requests for normal syncs to the `normal_sync` workers, and +the initial syncs to the `initial_sync` workers. + +We decide to which type of worker to forward the sync request to by looking at +the presence or absence of `since`: if it's there, it's a normal sync and we +set the variable `$sync` to `normal_sync`. If it's not there, we set `$sync` to +`initial_sync`. The content of `since` is irrelevant for nginx. + +This is what the map looks like: + +``` +map $arg_since $sync { + default normal_sync; + '' initial_sync; +} +``` + +We evaluate `$arg_since` to set `$sync`: `$arg_since` is nginx's variable `$arg_` +followed by `since`, the argument we want. See [the index of +variables in nginx](https://nginx.org/en/docs/varindex.html) for more +variables we can use in nginx. + +By default we set `$sync` to `normal_sync`, unless the argument `since` is +empty (absent); then we set it to `initial_sync`. + +After this mapping, we forward the request to the correct worker like this: + +``` +proxy_pass http://$sync; +``` + +See a complete example of maps in the file [maps.conf](maps.conf). + + +# Upstreams + +In our configuration, nginx is not only a reverse proxy, it's also a load balancer. +Just like what `haproxy` does, it can forward requests to "servers" behind it. +Such a server is the inbound UNIX socket of a worker, and there can be several +of them in one group. + +Let's start with a simple one, the `login` worker, that handles the login +process for clients. There's only one worker, so only one socket: + +``` +upstream login { + server unix:/run/matrix-synapse/inbound_login.sock max_fails=0; + keepalive 10; +} +``` + +Ater this definition, we can forward traffic to `login`. What traffic to +forward is decided in the `location` statements, see further. + +## Synchronisation + +A more complex example are the sync workers. Under [Maps](#Maps) we split sync +requests into two different types; those different types are handled by +different worker pools. In our case we have 2 workers for the initial_sync +requests, and 3 for the normal ones: + +``` +upstream initial_sync { + hash $mxid_localpart consistent; + server unix:/run/matrix-synapse/inbound_initial_sync1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_initial_sync2.sock max_fails=0; + keepalive 10; +} + +upstream normal_sync { + hash $mxid_localpart consistent; + server unix:/run/matrix-synapse/inbound_normal_sync1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_normal_sync2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_normal_sync3.sock max_fails=0; + keepalive 10; +} +``` + +The `hash` bit is to make sure that request from one user are consistently +forwarded to the same worker. We filled the variable `$mxid_localpart` in the +maps. + +## Federation + +Something similar goes for the federation workers. Some requests need to go +to the same worker as all the other requests from the same IP-addres, other +can go to any of these workers. + +We define two upstreams with the same workers, only with different names and +the explicit IP-address ordering for one: + +``` +upstream incoming_federation { + server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; + keepalive 10; +} + +upstream federation_requests { + hash $remote_addr consistent; + server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; + keepalive 10; +} +``` + +Same workers, different handling. See how we forward requests in the next +paragraph. + +See [upstreams.conf](upstreams.conf) for a complete example. + + +# Locations + +Now that we have defined the workers and/or worker pools, we have to forward +the right traffic to the right workers. The Synapse documentation about +[available worker +types](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications) +lists which endpoints a specific worker type can handle. + +## Login + +Let's forward login requests to our login worker. The [documentation for the +generic_worker](https://element-hq.github.io/synapse/latest/workers.html#synapseappgeneric_worker) +says these endpoints are for registration and login: + +``` +# Registration/login requests +^/_matrix/client/(api/v1|r0|v3|unstable)/login$ +^/_matrix/client/(r0|v3|unstable)/register$ +^/_matrix/client/(r0|v3|unstable)/register/available$ +^/_matrix/client/v1/register/m.login.registration_token/validity$ +^/_matrix/client/(r0|v3|unstable)/password_policy$ +``` + +We forward that to our worker with this `location` definition, using the +`proxy_forward` settings we defined earlier: + +``` +location ~ ^(/_matrix/client/(api/v1|r0|v3|unstable)/login|/_matrix/client/(r0|v3|unstable)/register|/_matrix/client/(r0|v3|unstable)/register/available|/_matrix/client/v1/register/m.login.registration_token/validity|/_matrix/client/(r0|v3|unstable)/password_policy)$ { + include snippets/proxy.conf; + proxy_pass http://login; +} +``` + +## Synchronisation + +The docs say that the `generic_worker` can handle these requests for synchronisation +requests: + +``` +# Sync requests +^/_matrix/client/(r0|v3)/sync$ +^/_matrix/client/(api/v1|r0|v3)/events$ +^/_matrix/client/(api/v1|r0|v3)/initialSync$ +^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ +``` + +We forward those to our 2 worker pools making sure the heavy initial syncs go +to the `initial_sync` pool, and the normal ones to `normal_sync`. We use the +variable `$sync`for that, which we defined in maps.conf. + +``` +# Normal/initial sync +location ~ ^/_matrix/client/(r0|v3)/sync$ { + include snippets/proxy.conf; + proxy_pass http://$sync; +} + +# Normal sync +location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ { + include snippets/proxy.conf; + proxy_pass http://normal_sync; +} + +# Initial sync +location ~ ^(/_matrix/client/(api/v1|r0|v3)/initialSync|/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync)$ { + include snippets/proxy.conf; + proxy_pass http://initial_sync; +} +``` + +## Media + +The media worker is slightly different: some parts are public, but a few bits +are admin stuff. We split those, and limit the admin endpoints to the trusted +addresses we defined earlier: + +``` +# Media, public +location ~* ^(/_matrix/((client|federation)/[^/]+/)media/|/_matrix/media/v3/upload/) { + include snippets/proxy.conf; + proxy_pass http://media; +} + +# Media, admin +location ~ ^/_synapse/admin/v1/(purge_)?(media(_cache)?|room|user|quarantine_media|users)/[\s\S]+|media$ { + include snippets/private.conf; + include snippets/proxy.conf; + proxy_pass http://media; +} +``` + +# Federation + +Federation is done by two types of workers: one pool for requests from our +server to the rest of the world, and one pool for everything coming in from the +outside world. Only the latter is relevant for nginx. + +The documentation mentions two different types of federation: +* Federation requests +* Inbound federation transaction request + +The second is special, in that requests for that specific endpoint must be +balanced by IP-address. The "normal" federation requests can be sent to any +worker. We're sending all these requests to the same workers, but we make sure +to always send requests from 1 IP-address to the same worker: + +``` +# Federation readers +location ~ ^(/_matrix/federation/v1/event/|/_matrix/federation/v1/state/|/_matrix/federation/v1/state_ids/|/_matrix/federation/v1/backfill/|/_matrix/federation/v1/get_missing_events/|/_matrix/federation/v1/publicRooms|/_matrix/federation/v1/query/|/_matrix/federation/v1/make_join/|/_matrix/federation/v1/make_leave/|/_matrix/federation/(v1|v2)/send_join/|/_matrix/federation/(v1|v2)/send_leave/|/_matrix/federation/v1/make_knock/|/_matrix/federation/v1/send_knock/|/_matrix/federation/(v1|v2)/invite/|/_matrix/federation/v1/event_auth/|/_matrix/federation/v1/timestamp_to_event/|/_matrix/federation/v1/exchange_third_party_invite/|/_matrix/federation/v1/user/devices/|/_matrix/key/v2/query|/_matrix/federation/v1/hierarchy/) { + include snippets/proxy.conf; + proxy_pass http://incoming_federation; +} +# Inbound federation transactions +location ~ ^/_matrix/federation/v1/send/ { + include snippets/proxy.conf; + proxy_pass http://federation_requests; +} +``` + diff --git a/nginx/workers/conn_optimizations.conf b/nginx/workers/conn_optimizations.conf new file mode 100644 index 0000000..6822bc2 --- /dev/null +++ b/nginx/workers/conn_optimizations.conf @@ -0,0 +1,13 @@ +# These settings optimize the connection handling. Store this file under /etc/nginx/conf.d, because +# it should be loaded by default. + +client_body_buffer_size 32m; +client_header_buffer_size 32k; +client_max_body_size 1g; +http2_max_concurrent_streams 128; +keepalive_timeout 65; +keepalive_requests 100; +large_client_header_buffers 4 16k; +server_names_hash_bucket_size 128; +tcp_nodelay on; +server_tokens off; diff --git a/nginx/workers/locations.conf b/nginx/workers/locations.conf new file mode 100644 index 0000000..b7adf25 --- /dev/null +++ b/nginx/workers/locations.conf @@ -0,0 +1,111 @@ +# This file describes the forwarding of (almost) every endpoint to a worker or pool of +# workers. This file should go in /etc/nginx/snippets, because we need to load it once, on +# the right place in our site-definition. + +# Account-data +location ~ ^(/_matrix/client/(r0|v3|unstable)/.*/tags|/_matrix/client/(r0|v3|unstable)/.*/account_data) { + include snippets/proxy.conf; + proxy_pass http://account_data; +} + +# Typing +location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing { + include snippets/proxy.conf; + proxy_pass http://typing; +} + +# Receipts +location ~ ^(/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt|/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers) { + include snippets/proxy.conf; + proxy_pass http://receipts; +} + +# Presence +location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ { + include snippets/proxy.conf; + proxy_pass http://presence; +} + +# To device +location ~ ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ { + include snippets/proxy.conf; + proxy_pass http://todevice; +} + +# Push rules +location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ { + include snippets/proxy.conf; + proxy_pass http://push_rules; +} + +# Userdir +location ~ ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ { + include snippets/proxy.conf; + proxy_pass http://userdir; +} + +# Media, users1 +location ~* ^/_matrix/((client|federation)/[^/]+/)media/ { + include snippets/proxy.conf; + proxy_pass http://media; +} +# Media, users2 +location ~* ^/_matrix/media/v3/upload { + include snippets/proxy.conf; + proxy_pass http://media; +} + +# Media, admin +location ~ ^/_synapse/admin/v1/(purge_)?(media(_cache)?|room|user|quarantine_media|users)/[\s\S]+|media$ { + include snippets/private.conf; + include snippets/proxy.conf; + proxy_pass http://media; +} + +# Login +location ~ ^(/_matrix/client/(api/v1|r0|v3|unstable)/login|/_matrix/client/(r0|v3|unstable)/register|/_matrix/client/(r0|v3|unstable)/register/available|/_matrix/client/v1/register/m.login.registration_token/validity|/_matrix/client/(r0|v3|unstable)/password_policy)$ { + include snippets/proxy.conf; + proxy_pass http://login; +} + +# Normal/initial sync: +# To which upstream to pass the request depends on the map "$sync" +location ~ ^/_matrix/client/(r0|v3)/sync$ { + include snippets/proxy.conf; + proxy_pass http://$sync; +} +# Normal sync: +# These endpoints are used for normal syncs +location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ { + include snippets/proxy.conf; + proxy_pass http://normal_sync; +} +# Initial sync: +# These endpoints are used for initial syncs +location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ { + include snippets/proxy.conf; + proxy_pass http://initial_sync; +} +location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ { + include snippets/proxy.conf; + proxy_pass http://initial_sync; +} + +# Federation +# All the "normal" federation stuff: +location ~ ^(/_matrix/federation/v1/event/|/_matrix/federation/v1/state/|/_matrix/federation/v1/state_ids/|/_matrix/federation/v1/backfill/|/_matrix/federation/v1/get_missing_events/|/_matrix/federation/v1/publicRooms|/_matrix/federation/v1/query/|/_matrix/federation/v1/make_join/|/_matrix/federation/v1/make_leave/|/_matrix/federation/(v1|v2)/send_join/|/_matrix/federation/(v1|v2)/send_leave/|/_matrix/federation/v1/make_knock/|/_matrix/federation/v1/send_knock/|/_matrix/federation/(v1|v2)/invite/|/_matrix/federation/v1/event_auth/|/_matrix/federation/v1/timestamp_to_event/|/_matrix/federation/v1/exchange_third_party_invite/|/_matrix/federation/v1/user/devices/|/_matrix/key/v2/query|/_matrix/federation/v1/hierarchy/) { + include snippets/proxy.conf; + proxy_pass http://incoming_federation; +} +# Inbound federation transactions: +location ~ ^/_matrix/federation/v1/send/ { + include snippets/proxy.conf; + proxy_pass http://federation_requests; +} + + +# Main thread for all the rest +location / { + include snippets/proxy.conf; + proxy_pass http://inbound_main; + diff --git a/nginx/workers/maps.conf b/nginx/workers/maps.conf new file mode 100644 index 0000000..376c808 --- /dev/null +++ b/nginx/workers/maps.conf @@ -0,0 +1,55 @@ +# These maps set all kinds of variables we can use later in our configuration. This fil +# should be stored under /etc/nginx/conf.d so that it is loaded whenever nginx starts. + +# List of allowed origins, can only send one. +map $http_origin $allow_origin { + ~^https?://element.example.com$ $http_origin; + ~^https?://call.example.com$ $http_origin; + ~^https?://someserver.example.com$ $http_origin; + # NGINX won't set empty string headers, so if no match, header is unset. + default ""; +} + +# Client username from MXID +map $http_authorization $mxid_localpart { + default $http_authorization; + "~Bearer syt_(?.*?)_.*" $username; + "" $accesstoken_from_urlparam; +} + +# Whether to upgrade HTTP connection +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +#Extract room name from URI +map $request_uri $room_name { + default "not_room"; + "~^/_matrix/(client|federation)/.*?(?:%21|!)(?[\s\S]+)(?::|%3A)(?[A-Za-z0-9.\-]+)" "!$room:$domain"; +} + +# Choose sync worker based on the existence of "since" query parameter +map $arg_since $sync { + default normal_sync; + '' initial_sync; +} + +# Extract username from access token passed as URL parameter +map $arg_access_token $accesstoken_from_urlparam { + # Defaults to just passing back the whole accesstoken + default $arg_access_token; + # Try to extract username part from accesstoken URL parameter + "~syt_(?.*?)_.*" $username; +} + +# Extract username from access token passed as authorization header +map $http_authorization $mxid_localpart { + # Defaults to just passing back the whole accesstoken + default $http_authorization; + # Try to extract username part from accesstoken header + "~Bearer syt_(?.*?)_.*" $username; + # if no authorization-header exist, try mapper for URL parameter "access_token" + "" $accesstoken_from_urlparam; +} + diff --git a/nginx/workers/private.conf b/nginx/workers/private.conf new file mode 100644 index 0000000..461857a --- /dev/null +++ b/nginx/workers/private.conf @@ -0,0 +1,13 @@ +# This file defines the "safe" IP addresses that are allowed to use the admin endpoints +# of our installation. Store this file under /etc/nginx/snippets, so you can load it on +# demand for the bits you want/need to protect. + +allow 127.0.0.1; +allow ::1; +allow 12.23.45.78; +allow 87.65.43.21; +allow dead:beef::/48; +allow 2a10:1234:abcd::1; +deny all; +satisfy all; + diff --git a/nginx/workers/proxy.conf b/nginx/workers/proxy.conf new file mode 100644 index 0000000..4c3dbc5 --- /dev/null +++ b/nginx/workers/proxy.conf @@ -0,0 +1,8 @@ +# These are a few proxy settings that should be default. These are not used in the proxy_forward to +# our workers, we don't want buffering there. Store this file under /etc/nginx/conf.d because it contains +# defaults. + +proxy_buffer_size 128k; +proxy_buffers 4 256k; +proxy_busy_buffers_size 256k; + diff --git a/nginx/workers/proxy_forward.conf b/nginx/workers/proxy_forward.conf new file mode 100644 index 0000000..95bd3c2 --- /dev/null +++ b/nginx/workers/proxy_forward.conf @@ -0,0 +1,20 @@ +# Settings that we want for every proxy_forward to our workers. This file should live +# under /etc/nginx/snippets, because it should not be loaded automatically but on demand. + +proxy_connect_timeout 2s; +proxy_buffering off; +proxy_http_version 1.1; +proxy_read_timeout 3600s; +proxy_redirect off; +proxy_send_timeout 120s; +proxy_socket_keepalive on; +proxy_ssl_verify off; + +proxy_set_header Accept-Encoding ""; +proxy_set_header Host $host; +proxy_set_header X-Forwarded-For $remote_addr; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Connection $connection_upgrade; +proxy_set_header Upgrade $http_upgrade; + +client_max_body_size 50M; diff --git a/nginx/workers/upstreams.conf b/nginx/workers/upstreams.conf new file mode 100644 index 0000000..a912030 --- /dev/null +++ b/nginx/workers/upstreams.conf @@ -0,0 +1,116 @@ +# Stream workers first, they are special. The documentation says: +# "each stream can only have a single writer" + +# Account-data +upstream account_data { + server unix:/run/matrix-synapse/inbound_accountdata.sock max_fails=0; + keepalive 10; +} + +# Userdir +upstream userdir { + server unix:/run/matrix-synapse/inbound_userdir.sock max_fails=0; + keepalive 10; +} + +# Typing +upstream typing { + server unix:/run/matrix-synapse/inbound_typing.sock max_fails=0; + keepalive 10; +} + +# To device +upstream todevice { + server unix:/run/matrix-synapse/inbound_todevice.sock max_fails=0; + keepalive 10; +} + +# Receipts +upstream receipts { + server unix:/run/matrix-synapse/inbound_receipts.sock max_fails=0; + keepalive 10; +} + +# Presence +upstream presence { + server unix:/run/matrix-synapse/inbound_presence.sock max_fails=0; + keepalive 10; +} + +# Push rules +upstream push_rules { + server unix:/run/matrix-synapse/inbound_push_rules.sock max_fails=0; + keepalive 10; +} + +# End of the stream workers, the following workers are of a "normal" type + +# Media +# If more than one media worker is used, they *must* all run on the same machine +upstream media { + server unix:/run/matrix-synapse/inbound_mediaworker.sock max_fails=0; + keepalive 10; +} + +# Synchronisation by clients: + +# Normal sync. Not particularly heavy, but happens a lot +upstream normal_sync { + # Use the username mapper result for hash key + hash $mxid_localpart consistent; + server unix:/run/matrix-synapse/inbound_normal_sync1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_normal_sync2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_normal_sync3.sock max_fails=0; + keepalive 10; +} +# Initial sync +# Much heavier than a normal sync, but happens less often +upstream initial_sync { + # Use the username mapper result for hash key + hash $mxid_localpart consistent; + server unix:/run/matrix-synapse/inbound_initial_sync1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_initial_sync2.sock max_fails=0; + keepalive 10; +} + +# Login +upstream login { + server unix:/run/matrix-synapse/inbound_login.sock max_fails=0; + keepalive 10; +} + +# Clients +upstream client { + hash $mxid_localpart consistent; + server unix:/run/matrix-synapse/inbound_clientworker1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_clientworker2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_clientworker3.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_clientworker4.sock max_fails=0; + keepalive 10; +} + +# Federation +# "Normal" federation, balanced round-robin over 4 workers. +upstream incoming_federation { + server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; + keepalive 10; +} +# Inbound federation requests, need to be balanced by IP-address, but can go +# to the same pool of workers as the other federation stuff. +upstream federation_requests { + hash $remote_addr consistent; + server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; + server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; + keepalive 10; +} + +# Main thread for all the rest +upstream inbound_main { + server unix:/run/matrix-synapse/inbound_main.sock max_fails=0; + keepalive 10; +} diff --git a/postgresql/README.md b/postgresql/README.md new file mode 100644 index 0000000..a542102 --- /dev/null +++ b/postgresql/README.md @@ -0,0 +1,84 @@ +--- +gitea: none +include_toc: true +--- + +# Installing PostgreSQL and creating database and user + +Installing [PostgreSQL](https://www.postgresql.org/) on Debian is very easy: + +``` +apt install postgresql python3-psycopg + +sudo -u postgres bash + +createuser --pwprompt synapse +createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse synapse + +``` + +After this, PostgreSQL is installed, the database `synapse` exists and so does +the database user `synapse`. Make sure you choose a strong password. + + +# Configuring access + +After a clean installation, PostgreSQL will listen on localhost, both IPv4 and +IPv6 (if available). In many cases, this is exactly what you want. + +## Network + +PostgreSQL will listen on localhost, this is configured in +`/etc/postgresql//main/postgresql.conf`: + +``` +listen_addresses = 'localhost' +``` + +This line is usually commented out, but as it is the default, it's really +there. + + +## UNIX socket + +If you want PostgreSQL to listen only to a local UNIX socket (more efficient +than network and -depending on the configuration of the rest of you system- +easier to protect), make the aforementioned option explicitly empty and +uncomment it: + +``` +listen_addresses = '' +``` + +Check these options to make sure the socket is placed in the right spot and +given the correct permissions: + +``` +unix_socket_directories = '/var/run/postgresql' +#unix_socket_group = '' +#unix_socket_permissions = 0777 +``` + + +## Permissions + +Add permission for the user to connect to the database from localhost (if +PostgreSQL listens on localhost), or the socket (if you use that). This is +configured in `/etc/postgresql//main/pg_hba.conf`: + +``` +local synapse synapse password # for use with UNIX sockets +host synapse synapse localhost md5 # for use with localhost network +``` + +Make sure you add these lines under the one that gives access to the postgres +superuser, the first line. + + +# Tuning {#tuning} + +This is for later, check [Tuning your PostgreSQL Server](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server) +on the PostgreSQL wiki. + +For tuning in the scenario with [Synapse workers](../synapse/workers), see [this +useful site](https://tcpipuk.github.io/postgres/tuning/index.html). diff --git a/synapse-admin/README.md b/synapse-admin/README.md new file mode 100644 index 0000000..7a636e7 --- /dev/null +++ b/synapse-admin/README.md @@ -0,0 +1,33 @@ +# Synapse-admin + +This is the webgui for Synapse. + +Installation can be done in 3 ways +([see Github](https://github.com/Awesome-Technologies/synapse-admin)), we'll +pick the easiest one: using the precompiled tar. + +Unpack it under `/var/www`, link `synapse-admin` to the directory that the +archive creates. This is to make sure you can easily unpack a newer version, +prepare that, and then change the symlink. + +``` +# ls -l /var/www +total 8 +drwxr-xr-x 2 root root 4096 Nov 4 18:05 html +lrwxrwxrwx 1 root root 20 Nov 18 13:24 synapse-admin -> synapse-admin-0.10.3 +drwxr-xr-x 5 root root 4096 Nov 18 15:54 synapse-admin-0.10.3 +``` + +We use 0.10.3, but point nginx to '/var/www/synapse-admin'. Configuring nginx +is fairly straightforward, [see here](../nginx/README.md#synapse-admin). + +You should probably restrict Synapse-Admin to your own Synapse-server, instead +of letting users fill in whatever they want. Do this by adding this bit to +`config.json`. In our config we've moved that file to +`/etc/synapse-admin` and link to that from `/var/www/synapse-admin`. + +``` +{ + "restrictBaseUrl": "https://matrix.example.com" +} +``` diff --git a/synapse/README.md b/synapse/README.md new file mode 100644 index 0000000..8658658 --- /dev/null +++ b/synapse/README.md @@ -0,0 +1,646 @@ +--- +gitea: none +include_toc: true +--- + +# Installation and configuration of Synapse + +Mind you: this an installation on Debian Linux (at least for now). + +Start by installing the latest Synapse server, see the [upstream +documentation](https://element-hq.github.io/synapse/latest/setup/installation.html). + +``` +apt install -y lsb-release wget apt-transport-https build-essential python3-dev libffi-dev \ + python3-pip python3-setuptools sqlite3 \ + libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev git python3-jinja2 + +wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg + +echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | + tee /etc/apt/sources.list.d/matrix-org.list + +apt update +apt install matrix-synapse-py3 +``` + +This leaves a very basic configuration in `/etc/matrix-synapse/homeserver.yaml` +and two settings under `/etc/conf.d`. All other configuration items will also +be configured with yaml-files in this directory. + +Configure the domain you with to use in `/etc/matrix-synapse/conf.d/server_name.yaml`. +What you configure here will also be the global part of your Matrix handles +(the part after the colon). Also add the URL clients should connect to: + +``` +server_name: example.com +public_baseurl: https://matrix.example.com/ +``` + +The `public_baseurl` will probably be different than the `server_name`, see +also [Delegation and DNS](#Delegation). + +You now have a standard Matrix server that uses sqlite. You really don't want +to use this in production, so probably want to replace this with PostgreSQL. + +There are two different ways to configure Synapse, documented here: + +* [Monolithic](monolithic) +* [Workers](workers) + +We'll use Synapse, using the workers architecture to make it scalable, flexible and reusable. + + +# Listeners + +A fresh installation configures one listener, for both client and federation +traffic. This listens on port 8008 on localhost (IPv4 and IPv6) and does not +do TLS: + +``` +listeners: + - port: 8008 + tls: false + type: http + x_forwarded: true + bind_addresses: ['::1', '127.0.0.1'] + resources: + - names: [client, federation] + compress: false +``` + +# Database + +The default installation leaves you with an sqlite3 database. Nice for experimenting, but +unsuitable for a production environment. + +[Here's how you setup PostgreSQL](../postgresql). + +Once you've created a database and user in PostgreSQL, you configure Synapse +to use it. + +First delete (or comment out) the SQLITE datbase in `homeserver.yaml`: + +``` +#database: +# name: sqlite3 +# args: +# database: /var/lib/matrix-synapse/homeserver.db +``` + +Then create the database configuration for PostgreSQL in +`conf.d/database.yaml`: + +``` +database: + name: psycopg2 + args: + user: synapse + password: + dbname: synapse + host: /var/run/postgresql + cp_min: 5 + cp_max: 10 +``` + +Note: you configure the directory where the UNIX socket file lives, not the +actual file. + +Of course, if you use localhost, you should configure it like this: + +``` + host: localhost + port: 5432 +``` + +After changing the database, restart Synapse and check whether it can connect +and create the tables it needs. + + +# Create admin + +Synapse doesn't create an admin account at install time, so you'll have to do +that yourself. + +You need to set a `registration_shared_secret` for this, set that in +`conf.d/keys.yaml` like this: + +``` +registration_shared_secret: xxxx +``` + +You can create such a key by running `pwgen -csn 52 1`. Restart Synapse after +setting this key. + +Now create an admin user. Login and issue this command: + +``` +register_new_matrix_user -u admin -a -c /etc/matrix-synapse/conf.d/keys.yaml +``` + +This will ask for a password, choose a safe one. + + +# Logging + +Logging is configured in `log.yaml`. Some logging should go to systemd, the +more specific logging to Synapse's own logfile(s). + +This part is yet to be completed, the default configuration is adequate for +most cases. + +# Delegation and DNS {#Delegation} + +If you run your server under a different FQDN than just the domain name you +want to use, you need to delegate: point from your domain to the server. + +Example. You want to use example.com for your domain, but your server is +called matrix.example.com. To make that work, you need to serve 2 bits of +JSON-code on example.com to point clients and servers to the correct +machine: matrix.example.com. + +Pointing servers to the correct server is done by publishing this bit of +JSON-code under `https://example.com/.well-known/matrix/server`: + +``` +{ + "m.homeserver": {"base_url": "https://matrix.example.com"} +} +``` + +Pointing clients to the correct server needs this at +`https://example.com/.well-known/matrix/client`: + +``` +{ + "m.server": "matrix.example.com" +} +``` + +Very important: both names (example.com and matrix.example.com) must be A +and/or AAAA records in DNS, not CNAME. + +You can also publish support data: administrator, security officer, helpdesk +page. Publish that as `.well-known/matrix/support`. + +See the included files for more elaborate examples, and check +[nginx](../nginx) for details about how to publish this data. + + +# E-mail {#Email} + +Synapse should probably be able to send out e-mails; notifications for those +who want that, and password reset for those who need one. + +You configure this under the section `email` (yes, really). + +First of all, you need an SMTP-server that is configured to send e-mail for +your domain. Configuring that is out of scope, we'll assume we can use the +server `smtp.example.com`. + +Configure this in `conf.d/email.yaml`: + +``` +email: + smtp_host: smtp.example.com + smtp_port: 465 + smtp_user: matrix@example.com + smtp_pass: SuperSecretPassword + force_tls: true + notif_from: "Your Matrix server " +``` + +This configures an SMTP-connection with SSL (port 465, `force_tls`). See Matrix' +[email documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=require_transport_security#email) +for more information. + + +# Media store {#mediastore} + +Files and avatars need to be stored somewhere, we configure these options in +`conf.d/mediastore.yaml`: + +``` +media_store_path: /var/lib/matrix-synapse/media +enable_authenticated_media: true +max_upload_size: 50M +url_preview_enabled: true +url_preview_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '192.0.0.0/24' + - '169.254.0.0/16' + - '192.88.99.0/24' + - '198.18.0.0/15' + - '192.0.2.0/24' + - '198.51.100.0/24' + - '203.0.113.0/24' + - '224.0.0.0/4' + - '::1/128' + - 'fe80::/10' + - 'fc00::/7' + - '2001:db8::/32' + - 'ff00::/8' + - 'fec0::/10' +``` + +These are a few sane (?) defaults, check [Matrix' documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=media_store_path#media-store) +for many more options. + + +# Homeserver blocking {#blocking} + +This is a series of options that can be used to block and/or limit users. The +whole list of options can be found in [Matrix' documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=mau_stats_only%3A#homeserver-blocking), +we're going to pick out a few useful ones. + +Let's configure these options in `conf.d/homeserver_blocking.yaml`. + +``` +admin_contact: matrixadmin@example.com +mau_stats_only: true +max_avatar_size: 2M +allowed_avatar_mimetypes: + - "image/png" + - "image/jpeg" + - "image/gif" +forgotten_room_retention_period: 7d +``` + + +# Authentication {#authentication} + +Logging in can be done in basically two ways: an internal or external +database. Let's start with the first: users and their passwords are stored in +Synapse's database. + +We use `conf.d/authentication.yaml` to configure this stuff. + +``` +password_config: + policy: + enabled: true + localdb_enabled: true + pepper: + minimum_length: 8 + require_digit: true + require_symbol: true + require_lowercase: true + require_uppercase: true +``` + +With this bit, we configure Synapse to let users pick and change their own +passwords, as long as they meet the configured conditions. Mind you: `pepper` is +a secret random string that should *NEVER* be changed after initial setup. + +But in a bigger environment you'll probably want to use some authentication +backend, such as LDAP. LDAP is configured by means of a module (see +[Synapse LDAP auth Provider](https://github.com/matrix-org/matrix-synapse-ldap3/) +on Github). + +Configuring Synapse to use LDAP, would be something like this: + +``` +password_config: + policy: + enabled: only_for_reauth + localdb_enabled: false + +password_providers: + - module: "ldap_auth_provider.LdapAuthProvider" + config: + enabled: true + uri: "ldap://ldap.example.com:389" + start_tls: true + base: "ou=users,dc=example,dc=com" + attributes: + uid: "uid" + mail: "mail" + name: "cn" + filter: "(&(objectClass=posixAccount)(accountStatus=active))" + + mode: "search" + bind_dn: "cn=matrix,ou=service,dc=example,dc=com" + bind_password: "" +``` + +This would connect to ldap.example.com over TLS, and authenticate users that +live under `ou=users,dc=example,dc=com` and that are active Posix +accounts. Users will not be able to change their passwords via Matrix, they +have to do that in LDAP. + +The bottom 3 lines enable search mode, necessary to find users' displayname +and e-mail address. These values are in LDAP under the attributes "mail" and +"cn" (completely dependent on your LDAP DIT of course, this setup is common +for OpenLDAP). The bind_dn and bind_password are for the account Synapse can +use to connect and search, necessary if anonymous access is prohibited. + + +# Server configuration {#serverconfig} + +See [Define your homeserver name and other base options](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=require_auth_for_profile_requests#server) +in the Synapse documentation. + +It would be logical to put the next options under `conf.d/server.yaml`, but +Debian insists on `conf.d/server_name.yaml` existing and containing the name +of the domain. So we'll use that file for the next options as well. Add these +options: + +``` +presence: + enabled: true + include_offline_users_on_sync: false + +require_auth_for_profile_requests: true +allow_public_rooms_over_federation: true + +ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '192.0.0.0/24' + - '169.254.0.0/16' + - '192.88.99.0/24' + - '198.18.0.0/15' + - '192.0.2.0/24' + - '198.51.100.0/24' + - '203.0.113.0/24' + - '224.0.0.0/4' + - '::1/128' + - 'fe80::/10' + - 'fc00::/7' + - '2001:db8::/32' + - 'ff00::/8' + - 'fec0::/10' + +filter_timeline_limit: 500 +delete_stale_devices_after: 1y +``` + +These should be reasonable defaults, but do check the [Server block](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#server) +in Synapse's documentation for more options and information. + + +# Registration {#Registration} + +Registration of new users is configured under `conf.d/registration.yaml`: + +``` +enable_registration: false +enable_registration_without_verification: false +registrations_require_3pid: email +registration_shared_secret: +allow_guest_access: false + +enable_set_displayname: false +enable_3pid_changes: false +``` + +The last two lines prohibit users to change their displayname and 3pid-data +(i.e. e-mail address and phone number). In many cases you'd want them to be +able to set these, of course. But when you use LDAP, which provides these +values, you don't want users to change those. + +See for more options [Synapse's documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#registration). + + +# TURN + +Check for more information about [how to configure the TURN +server](../coturn) or [LiveKit](../element-call#livekit). You probably want +LiveKit, but read on if you choose coturn. + +It might be useful to use both coturn and LiveKit, so as to support both +legacy and EC calls, but you'd need to tweak the configurations so that they +don't bite each other. + +Once you've set up your TURN server, configure it in +Synapse, in `conf.d/turn.yaml`: + +``` +turn_shared_secret: "" +turn_uris: + - "turn:turn.matrixdev.example.com?transport=udp" + - "turn:turn.matrixdev.example.com?transport=tcp" +turn_user_lifetime: 86400000 +turn_allow_guests: true +``` + +Restart Synapse to activate this bit. + + +# Consent Tracking {#consenttracking} + +As administrator you sometimes need to push a message to all your users. See +the [Synapse documentation](https://element-hq.github.io/synapse/latest/server_notices.html) +to see how to configure that. + +It's also necessary for moderation ([see Draupnir](../draupnir)). + + +## Server Notices + +Server notices allow administrators to send messages to users, much like the +`wall` functionality in UNIX/Linux. + +Add this bit of info to `conf.d/server_notices.yaml`: + +``` +server_notices: + system_mxid_localpart: server + system_mxid_display_name: "Server Notices" +# system_mxid_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" + room_name: "Server Notices" +# room_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" + room_topic: "Room used by your server admin to notice you of important +information" + auto_join: true +``` + +This means that the user sending the messages (who isn't really a user anyway) +is `server@example.com`, with the display name `Server Notices`. The room that users receive +these messages in is called the same. The room will be created if it doesn't +yet exist, every user that receives a server message will be put in a room +with that name. + +Every user gets his own room, so if you send a server notice to 100 users, +there will be (at least) 100 rooms by that name, all containing 1 user. + +The option `auto_join` means that users will automatically join the room as +soon as it's created. They can leave afterwards, but they'll be put into it again +as soon as they receive another server message. + +The two commented out options are the avatars for user and room. This is a bit +tricky. You'll need to upload an image to a room first, so that it's present +in the media store. Then you can refer to it by the ID it gets, in the way +shown above. These avatars will only be set or changed when you send a server +notice. + +Important bit: you must upload these pictures to an unencrypted room. Pictures +in an encrypted room are... well... encrypted, and that causes a problem for +the thumbnailer. Pictures in encrypted rooms are stored as MIME type +`application/octet-stream`, you want one of the formats you configured under +[Homeserver Blocking](#blocking). Or, if you haven't defined a whitelist, at +least an image mimetype... + +Apparently this was a bug that's supposed to be fixed in Synapse 1.20, but we +haven't tested that yet. + +You can find the ID of the picture in the database (table `local_media_repository`) +or, more conveniently, in [Synapse-Admin](../synapse-admin), which is also +where you'll want to go if you want to send a server notice. + +In Synapse-Admin, open the User tab, select the user(s) you want to send a +notice to, and click "Send Server Notices". + +If the result is that you're returned to the login screen of Synapse-Admin, +there was an error sending the notice. Check the Synapse logs. + + +## Consent template + +You can force your users to accept an agreement before you let them on your +machine, see the [Synapse Documentation](https://element-hq.github.io/synapse/latest/consent_tracking.html#support-in-synapse-for-tracking-agreement-to-server-terms-and-conditions). + +First, make the directory where you want Synapse to search for the document, +we create the directory `consent_policy`: + + +``` +mkdir -p /var/lib/matrix-synapse/consent_policy/en +``` + +You'll have to add the directory `en` under that, as every document is assumed +to be in English. Support for other languages is on the wish list. + +Create a Jinja2 template with the texts you want: the text users have to agree +to before they can use the service, and the text users that have already +agreed will see. Something like this: + +``` + + + + Example End User Policy + + + {% if has_consented %} +

+ You have already accepted the Example End User Policy. +

+ {% else %} +

Example End User Policy

+ +These are the terms under which you can use this service. Unless you accept these terms, you +will not be allowed to send any messages. + +
    +
  1. You will not be abusive to other users, be they on this server or on an other. +
  2. You will not do other nasty stuff. +
  3. Basically: you will behave like a good person. +
+ +We promise you a few things too: + +
    +
  1. We'll keep your data safe +
  2. We won't snoop on you +
  3. We'll only turn you in with the authorities if you do nasty stuff. +
+ +If you accept these terms, you can use this system. + {% if not public_version %} + +
+ + + + +
+ {% endif %} + {% endif %} + + +``` + +The name of this document needs to be a version name with the extension `.html`. +Say you want your users to accept version 0.1, the file must be named +0.1.html. This version is referred to in the configuration. + +After a user has agreed to this policy, he is presented with `success.html`, +which you will also have to make (although it's not mentioned in the +documentation). This doesn't have to be very complicated. + +``` + + + + ProcoliX End User Policy + + +

You have agreed to our End User Policy, you can now use our service.

+ +

Have fun!

+ + +``` + +We now have the texts ready, time to configure Synapse to use it. + +Create a `form_secret`: + +``` +pwgen -csny 30 1 +``` + +Add this bit to `conf.d/server_notices.yaml`: + +``` +form_secret: "" +user_consent: + require_at_registration: true + policy_name: "Example End User Policy" + template_dir: consent_policy + version: + server_notice_content: + msgtype: m.text + body: >- + You have to agree to our End User Policy before you can use this + service. Please read and accept it at %(consent_uri)s. + block_events_error: >- + You haven't accepted the End User Policy yet, so you can't post any + messages yet. Please read and accept the policy at %(consent_uri)s. +``` + +Last bit it to enable the consent tracking on all listeners where `client` is +active. We have only one listener, so we add `consent` to that: + +``` +listeners: + - port: 8008 + tls: false + type: http + x_forwarded: true + bind_addresses: ['::1', '127.0.0.1'] + resources: + - names: + - client + - consent + - federation + compress: false +``` + +Restart Synapse for these changes to take effect. + +If you update your policy, you'll have to copy the current one to a new +version, edit that (e.g. `0.2.html`) and change the `version` to the new +document. Restart Synapse after that. Your users will all have to agree to the +new policy. + +The options `server_notice_content` and `block_events_error` do not seem to be +used, this is something that needs to be investigated. diff --git a/synapse/conf.d/authentication.yaml b/synapse/conf.d/authentication.yaml new file mode 100644 index 0000000..47292f9 --- /dev/null +++ b/synapse/conf.d/authentication.yaml @@ -0,0 +1,22 @@ +# Authentication stuff + +password_config: + policy: + enabled: only_for_reauth + localdb_enabled: false + +password_providers: + - module: "ldap_auth_provider.LdapAuthProvider" + config: + enabled: true + uri: "ldap://ldap.example.com" + start_tls: true + mode: "search" + base: "ou=users,o=Example,dc=example,dc=eu" + attributes: + uid: "uid" + mail: "mail" + name: "cn" + filter: "(&(objectClass=posixAccount)(accountStatus=active))" + bind_dn: "cn=matrix,ou=service,dc=example,dc=com" + bind_password: "" diff --git a/synapse/conf.d/call.yaml b/synapse/conf.d/call.yaml new file mode 100644 index 0000000..00e6da4 --- /dev/null +++ b/synapse/conf.d/call.yaml @@ -0,0 +1,19 @@ +experimental_features: + # MSC3266: Room summary API. Used for knocking over federation + msc3266_enabled: true + +# The maximum allowed duration by which sent events can be delayed, as +# per MSC4140. +max_event_delay_duration: 24h + +rc_message: + # This needs to match at least the heart-beat frequency plus a bit of headroom + # Currently the heart-beat is every 5 seconds which translates into a rate of 0.2s + per_second: 0.5 + burst_count: 30 + +extra_well_known_client_content: + org.matrix.msc4143.rtc_foci: + type: livekit + livekit_service_url: https://livekit.example.com + diff --git a/synapse/conf.d/database.yaml b/synapse/conf.d/database.yaml new file mode 100644 index 0000000..9e43cd1 --- /dev/null +++ b/synapse/conf.d/database.yaml @@ -0,0 +1,9 @@ +database: + name: psycopg2 + args: + user: synapse + password: + dbname: synapse + host: /var/run/postgresql + cp_min: 5 + cp_max: 10 diff --git a/synapse/conf.d/email.yaml b/synapse/conf.d/email.yaml new file mode 100644 index 0000000..08d5a4d --- /dev/null +++ b/synapse/conf.d/email.yaml @@ -0,0 +1,9 @@ +# This takes care of sending e-mail + +email: + smtp_host: smtp.example.com + smtp_port: 465 + smtp_user: matrix@example.com + smtp_pass: + force_tls: true + notif_from: "Your Matrix server " diff --git a/synapse/conf.d/homeserver_blocking.yaml b/synapse/conf.d/homeserver_blocking.yaml new file mode 100644 index 0000000..b98e4fe --- /dev/null +++ b/synapse/conf.d/homeserver_blocking.yaml @@ -0,0 +1,11 @@ +# Various settings for blocking stuff. +# See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=mau_stats_only%3A#homeserver-blocking + +admin_contact: admin@example.com +mau_stats_only: true +max_avatar_size: 2M +allowed_avatar_mimetypes: + - "image/png" + - "image/jpeg" + - "image/gif" +forgotten_room_retention_period: 7d diff --git a/synapse/conf.d/keys.yaml b/synapse/conf.d/keys.yaml new file mode 100644 index 0000000..6be0844 --- /dev/null +++ b/synapse/conf.d/keys.yaml @@ -0,0 +1,5 @@ +# This file contains secrets + +signing_key_path: "/etc/matrix-synapse/homeserver.signing.key" +macaroon_secret_key: +registration_shared_secret: diff --git a/synapse/conf.d/mediastore.yaml b/synapse/conf.d/mediastore.yaml new file mode 100644 index 0000000..80d6592 --- /dev/null +++ b/synapse/conf.d/mediastore.yaml @@ -0,0 +1,29 @@ +# Media stuff +# See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=media_store_path#media-store + +media_store_path: /var/lib/matrix-synapse/media +enable_authenticated_media: true +max_upload_size: 50M +url_preview_enabled: true +url_preview_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '192.0.0.0/24' + - '169.254.0.0/16' + - '192.88.99.0/24' + - '198.18.0.0/15' + - '192.0.2.0/24' + - '198.51.100.0/24' + - '203.0.113.0/24' + - '224.0.0.0/4' + - '::1/128' + - 'fe80::/10' + - 'fc00::/7' + - '2001:db8::/32' + - 'ff00::/8' + - 'fec0::/10' + +dynamic_thumbnails: true diff --git a/synapse/conf.d/report_stats.yaml b/synapse/conf.d/report_stats.yaml new file mode 100644 index 0000000..8e8bc67 --- /dev/null +++ b/synapse/conf.d/report_stats.yaml @@ -0,0 +1,5 @@ +# This file is autogenerated, and will be recreated on upgrade if it is deleted. +# Any changes you make will be preserved. + +# Whether to report homeserver usage statistics. +report_stats: true diff --git a/synapse/conf.d/server_name.yaml b/synapse/conf.d/server_name.yaml new file mode 100644 index 0000000..ea7106e --- /dev/null +++ b/synapse/conf.d/server_name.yaml @@ -0,0 +1,43 @@ +# This file is autogenerated, and will be recreated on upgrade if it is deleted. +# Any changes you make will be preserved. + +# The domain name of the server, with optional explicit port. +# This is used by remote servers to connect to this server, +# e.g. matrix.org, localhost:8080, etc. +# This is also the last part of your UserID. +# +server_name: example.com + +# The rest is our local configuration: +public_baseurl: https://matrix.example.com/ + +presence: + enabled: true + include_offline_users_on_sync: false + +require_auth_for_profile_requests: true +allow_public_rooms_over_federation: true + +ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '192.0.0.0/24' + - '169.254.0.0/16' + - '192.88.99.0/24' + - '198.18.0.0/15' + - '192.0.2.0/24' + - '198.51.100.0/24' + - '203.0.113.0/24' + - '224.0.0.0/4' + - '::1/128' + - 'fe80::/10' + - 'fc00::/7' + - '2001:db8::/32' + - 'ff00::/8' + - 'fec0::/10' + +filter_timeline_limit: 500 +delete_stale_devices_after: 1y diff --git a/synapse/conf.d/server_notices.yaml b/synapse/conf.d/server_notices.yaml new file mode 100644 index 0000000..1c8b82a --- /dev/null +++ b/synapse/conf.d/server_notices.yaml @@ -0,0 +1,26 @@ +# Necessary for server notices, and moderation + +server_notices: + system_mxid_localpart: server + system_mxid_display_name: "Server Notices" + system_mxid_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" + room_name: "Server Notices" + room_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" + room_topic: "Room used by your server admin to notice you of important information" + auto_join: true + +user_consent: + require_at_registration: true + policy_name: "Example End User Policy" + template_dir: consent_policy + version: 0.2 + server_notice_content: + msgtype: m.text + body: >- + You have to agree to our End User Policy before you can use this + service. Please read and accept it at %(consent_uri)s. + block_events_error: >- + You haven't accepted the End User Policy yet, so you can't post any + messages yet. Please read and accept the policy at %(consent_uri)s. + +form_secret: "" diff --git a/synapse/conf.d/turn.yaml b/synapse/conf.d/turn.yaml new file mode 100644 index 0000000..037dc35 --- /dev/null +++ b/synapse/conf.d/turn.yaml @@ -0,0 +1,9 @@ +# This configures the connection to the TURN server + +turn_shared_secret: "" +turn_uris: + - "turn:turn.example.com?transport=udp" + - "turn:turn.example.com?transport=tcp" +turn_user_lifetime: 86400000 +turn_allow_guests: true + diff --git a/synapse/homeserver.yaml b/synapse/homeserver.yaml new file mode 100644 index 0000000..0df64a0 --- /dev/null +++ b/synapse/homeserver.yaml @@ -0,0 +1,34 @@ +# Configuration file for Synapse. +# +# This is a YAML file: see [1] for a quick introduction. Note in particular +# that *indentation is important*: all the elements of a list or dictionary +# should have the same indentation. +# +# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html +# +# For more information on how to configure Synapse, including a complete accounting of +# each option, go to docs/usage/configuration/config_documentation.md or +# https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html +# +# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations. +# server_name: "SERVERNAME" +pid_file: "/var/run/matrix-synapse.pid" +listeners: + - port: 8008 + tls: false + type: http + x_forwarded: true + bind_addresses: ['::1', '127.0.0.1'] + resources: + - names: + - client + - consent + - federation + compress: false +#database: +# name: sqlite3 +# args: +# database: /var/lib/matrix-synapse/homeserver.db +log_config: "/etc/matrix-synapse/log.yaml" +trusted_key_servers: + - server_name: "matrix.org" diff --git a/synapse/templates/0.1.html b/synapse/templates/0.1.html new file mode 100644 index 0000000..e4c2129 --- /dev/null +++ b/synapse/templates/0.1.html @@ -0,0 +1,43 @@ + + + + Example End User Policy + + + {% if has_consented %} +

+ You have already accepted the Example End User Policy. +

+ {% else %} +

Example End User Policy

+ +These are the terms under which you can use this service. Unless you accept these terms, you +will not be allowed to send any messages. + +
    +
  1. You will not be abusive to other users, be they on this server or on an other. +
  2. You will not do other nasty stuff. +
  3. Basically: you will behave like a good person. +
+ +We promise you a few things too: + +
    +
  1. We'll keep your data safe +
  2. We won't snoop on you +
  3. We'll only turn you in with the authorities if you do nasty stuff. +
+ +If you accept these terms, you can use this system. + {% if not public_version %} + +
+ + + + +
+ {% endif %} + {% endif %} + + diff --git a/synapse/templates/success.html b/synapse/templates/success.html new file mode 100644 index 0000000..e1c324c --- /dev/null +++ b/synapse/templates/success.html @@ -0,0 +1,11 @@ + + + + Example End User Policy + + +

You have agreed to our End User Policy, you can now use our service.

+ +

Have fun!

+ + diff --git a/synapse/well-known-client.json b/synapse/well-known-client.json new file mode 100644 index 0000000..28a67db --- /dev/null +++ b/synapse/well-known-client.json @@ -0,0 +1,12 @@ +{ + "m.homeserver": { + "base_url": "https://matrix.example.com" + }, + + "org.matrix.msc4143.rtc_foci":[ + { + "type": "livekit", + "livekit_service_url": "https://livekit.example.com" + } + ] +} diff --git a/synapse/well-known-server.json b/synapse/well-known-server.json new file mode 100644 index 0000000..b9ffd99 --- /dev/null +++ b/synapse/well-known-server.json @@ -0,0 +1 @@ +{"m.server": "matrix.example.com"} diff --git a/synapse/well-known-support.json b/synapse/well-known-support.json new file mode 100644 index 0000000..ef9be1a --- /dev/null +++ b/synapse/well-known-support.json @@ -0,0 +1,17 @@ +{ + "contacts": [ + { + "email_address": "admin@example.com", + "matrix_id": "@john:example.com", + "role": "m.role.admin" + }, + + { + "email_address": "security@example.com", + "matrix_id": "@bob:example.com", + "role": "m.role.security" + } + ], + + "support_page": "https://support.example.com/" +} diff --git a/synapse/workers/README.md b/synapse/workers/README.md new file mode 100644 index 0000000..2f8aebc --- /dev/null +++ b/synapse/workers/README.md @@ -0,0 +1,593 @@ +--- +gitea: none +include_toc: true +--- + +# Introduction to a worker-based setup + +Very busy servers are brought down because a single thread can't keep up with +the load. So you want to create several threads for different types of work. + +See this [Matrix blog](https://matrix.org/blog/2020/11/03/how-we-fixed-synapse-s-scalability/) +for some background information. + +The traditional Synapse setup is one monolithic piece of software that does +everything. Joining a very busy room makes a bottleneck, as the server will +spend all its cycles on synchronizing that room. + +You can split the server into workers, that are basically Synapse servers +themselves. Redirect specific tasks to them and you have several different +servers doing all kinds of tasks at the same time. A busy room will no longer +freeze the rest. + +Workers communicate with each other via UNIX sockets and Redis. We choose +UNIX sockets because they're much more efficient than network sockets. Of +course, if you scale to more than one machine, you will need network sockets +instead. + +**Important note** + +While the use of workers can drastically improve speed, the law of diminished +returns applies. Splitting off more and more workers will not further improve +speed after a certain point. Plus: you need to understand what the most +resource-consuming tasks are before you can start to plan how many workers for +what tasks you need. + +In this document we'll basically create a worker for every task, and several +workers for a few heavy tasks, as an example. You mileage may not only vary, it +will. + +Tuning the rest of the machine and network also counts, especially PostgreSQL. +A well-tuned PostgreSQL can make a really big difference and should probably +be considered even before configuring workers. + +With workers, PostgreSQL's configuration should be changed accordingly: see +[Tuning PostgreSQL for a Matrix Synapse +server](https://tcpipuk.github.io/postgres/tuning/index.html) for hints and +examples. + +A worker-based Synapse is tailor-made, there is no one-size-fits-all approach. +All we can do here is explain how things work, what to consider and how to +build what you need by providing examples. + + +# Redis + +Workers need Redis as part of their communication, so our first step will be +to install Redis. + +``` +apt install redis-server +``` + +For less overhead we use a UNIX socket instead of a network connection to +localhost. Disable the TCP listener and enable the socket in +`/etc/redis/redis.conf`: + +``` +port 0 + +unixsocket /run/redis/redis-server.sock +unixsocketperm 770 +``` + +Our matrix user (`matrix-synapse`) has to be able to read from and write to +that socket, which is created by Redis and owned by `redis:redis`, so we add +user `matrix-synapse` to the group `redis`. You may come up with a +finer-grained permission solution, but for our example this will do. + +``` +adduser matrix-synapse redis +``` + +Restart Redis for these changes to take effect. Check for error messages in +the logs, if port 6379 is no longer active, and if the socketfile +`/run/redis/redis-server.sock` exists. + +Now point Synapse at Redis in `conf.d/redis.yaml`: + +``` +redis: + enabled: true + path: /run/redis/redis-server.sock +``` + +Restart Synapse and check if it can connect to Redis via the socket, you should find log +entries like this: + +``` +synapse.replication.tcp.redis - 292 - INFO - sentinel - Connecting to redis server UNIXAddress('/run/redis/redis-server.sock') +synapse.util.httpresourcetree - 56 - INFO - sentinel - Attaching to path b'/_synapse/replication' +synapse.replication.tcp.redis - 126 - INFO - sentinel - Connected to redis +synapse.replication.tcp.redis - 138 - INFO - subscribe-replication-0 - Sending redis SUBSCRIBE for ['matrix.example.com/USER_IP', 'matrix.example.com'] +synapse.replication.tcp.redis - 141 - INFO - subscribe-replication-0 - Successfully subscribed to redis stream, sending REPLICATE command +synapse.replication.tcp.redis - 146 - INFO - subscribe-replication-0 - REPLICATE successfully sent +``` + + +# Synapse + +Workers communicate with each other over sockets, that are all placed in one +directory. These sockets are owned by `matrix-synapse:matrix-synapse`, so make +sure nginx can write to them: add user `www-data` to group `matrix-synapse` +and restart nginx. + +Then, make sure systemd creates the directory for the sockets as soon as +Synapse starts: + +``` +systemctl edit matrix-synapse +``` + +Now override parts of the `Service` stanza to add these two lines: + +``` +[Service] +RuntimeDirectory=matrix-synapse +RuntimeDirectoryPreserve=yes +``` + +The directory `/run/matrix-synapse` will be created as soon +as Synapse starts, and will not be removed on restart or stop, because that +would create problems with workers who suddenly lose their sockets. + +Then we change Synapse from listening on `localhost:8008` to listening on a +socket. We'll do most of our workers work in `conf.d/listeners.yaml`, so let's +put the new listener configuration for the main proccess there. + +Remove the `localhost:8008` stanza, and configure these two sockets: + +``` +listeners: + - path: /run/matrix-synapse/inbound_main.sock + mode: 0660 + type: http + resources: + - names: + - client + - consent + - federation + + - path: /run/matrix-synapse/replication_main.sock + mode: 0660 + type: http + resources: + - names: + - replication +``` + +This means Synapse will create two sockets under `/run/matrix-synapse`: one +for incoming traffic that is forwarded by nginx (`inbound_main.sock`), and one for +communicating with all the other workers (`replication_main.sock`). + +If you restart Synapse now, it won't do anything anymore, because nginx is +still forwarding its traffic to `localhost:8008`. We'll get to nginx later, +but for now you should change: + +``` +proxy_forward http://localhost:8008; +``` + +to + +``` +proxy_forward http://unix:/run/matrix-synapse/inbound_main.sock; +``` + +If you've done this, restart Synapse and nginx, and check if the sockets are created +and have the correct permissions. + +Synapse should work normally again, we've switched from network sockets to +UNIX sockets, and added Redis. Now we'll create the actual workers. + + +# Worker overview + +Every worker is, in fact, a Synapse server, only with a limited set of tasks. +Some tasks can be handled by a number of workers, others only by one. Every +worker starts as a normal Synapse process, reading all the normal +configuration files, and then a bit of configuration for the specific worker +itself. + +Workers need to communicate with each other and the main process, they do that +via the `replication` sockets under `/run/matrix-synapse` and Redis. + +Most worker also need a way to be fed traffic by nginx: they have an `inbound` +socket for that, in the same directory. + +Finally, all those replicating workers need to be registered in the main +process: all workers and their replication sockets are listed in the `instance_map`. + + +## Types of workers + +We'll make separate workers for almost every task, and several for the +heaviest tasks: synchronising. An overview of what endpoints are to be +forwarded to a worker is in [Synapse's documentation](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications). + +We'll create the following workers: + +* login +* federation_sender +* mediaworker +* userdir +* pusher +* push_rules +* typing +* todevice +* accountdata +* presence +* receipts +* initial_sync: 1 and 2 +* normal_sync: 1, 2 and 3 + +Some of them are `stream_writers`, and the [documentation about +stream_witers](https://element-hq.github.io/synapse/latest/workers.html#stream-writers) +says: + +``` +Note: The same worker can handle multiple streams, but unless otherwise documented, each stream can only have a single writer. +``` + +So, stream writers must have unique tasks: you can't have two or more workers +writing to the same stream. Stream writers have to be listed in `stream_writers`: + +``` +stream_writers: + account_data: + - accountdata + presence: + - presence + receipts: + - receipts + to_device: + - todevice + typing: + - typing + push_rules: + - push_rules +``` + +As you can see, we've given the stream workers the name of the stream they're +writing to. We could combine all those streams into one worker, which would +probably be enough for most instances. + +We could define a worker with the name streamwriter and list it under all +streams instead of a single worker for every stream. + +Finally, we have to list all these workers under `instance_map`: their name +and their replication socket: + +``` +instance_map: + main: + path: "/run/matrix-synapse/replication_main.sock" + login: + path: "/run/matrix-synapse/replication_login.sock" + federation_sender: + path: "/run/matrix-synapse/replication_federation_sender.sock" + mediaworker: + path: "/run/matrix-synapse/replication_mediaworker.sock" +... + normal_sync1: + path: "unix:/run/matrix-synapse/replication_normal_sync1.sock" + normal_sync2: + path: "unix:/run/matrix-synapse/replication_normal_sync2.sock" + normal_sync3: + path: "unix:/run/matrix-synapse/replication_normal_sync3.sock" +``` + + +## Defining a worker + +Every working starts with the normal configuration files, and then loads its +own. We put those files under `/etc/matrix-synapse/workers`. You have to +create that directory, and make sure Synapse can read them. Being +profesionally paranoid, we restrict access to that directory and the files in +it: + +``` +mkdir /etc/matrix-synapse/workers +chown matrix-synapse:matrix-synapse /etc/matrix-synapse/workers +chmod 750 /etc/matrix-synapse-workers +``` + +We'll fill this directory with `yaml` files; one for each worker. + + +### Generic worker + +Workers look very much the same, very little configuration is needed. This is +what you need: + +* name +* replication socket (not every worker needs this) +* inbound socket (not every worker needs this) +* log configuration + +One worker we use handles the login actions, this is how it's configured in +/etc/matrix-synapse/workers/login.yaml`: + +``` +worker_app: "synapse.app.generic_worker" +worker_name: "login" +worker_log_config: "/etc/matrix-synapse/logconf.d/login.yaml" + +worker_listeners: + - path: "/run/matrix-synapse/inbound_login.sock" + type: http + resources: + - names: + - client + - consent + - federation + + - path: "/run/matrix-synapse/replication_login.sock" + type: http + resources: + - names: [replication] +``` + +The first line defines the type of worker. In the past there were quite a few +different types, but most of them have been phased out in favour of one +generic worker. + +The first listener is the socket where nginx sends all traffic related to logins +to. You have to configure nginx to do that, we'll get to that later. + +The `worker_log_config` defines how and where the worker logs. Of course you'll +need to configure that too, see further. + +The first `listener` is the inbound socket, that nginx uses to forward login +related traffic to. Make sure nginx can write to this socket. The +`resources` vary between workers. + +The second `listener` is used for communication with the other workers and the +main thread. The only `resource` it needs is `replication`. This socket needs +to be listed in the `instance_map` in the main thread, the inbound socket does +not. + +Of course, if you need to scale up to the point where you need more than one +machine, these listeners can no longer use UNIX sockets, but will have to use +the network. This creates extra overhead, so you want to use sockets whenever +possible. + + +### Media worker + +The media worker is slightly different than the generic one. It doesn't use the +`synapse.app.generic_worker`, but a specialised one: `synapse.app.media_repository`. +To prevent the main process from handling media itself, you have to explicitly +tell it to leave that to the worker, by adding this to the configuration (in +our setup `conf.d/listeners.yaml`): + +``` +enable_media_repo: false +media_instance_running_background_jobs: mediaworker +``` + +The worker `mediaworker` looks like this: + +``` +worker_app: "synapse.app.media_repository" +worker_name: "mediaworker" +worker_log_config: "/etc/matrix-synapse/logconf.d/media.yaml" + +worker_listeners: + - path: "/run/matrix-synapse/inbound_mediaworker.sock" + type: http + resources: + - names: [media] + + - path: "/run/matrix-synapse/replication_mediaworker.sock" + type: http + resources: + - names: [replication] +``` + +If you use more than one mediaworker, know that they must all run on the same +machine; scaling it over more than one machine will not work. + + +## Worker logging + +As stated before, you configure the logging of workers in a separate yaml +file. As with the definitions of the workers themselves, you need a directory for +that. We'll use `/etc/matrix-synapse/logconf.d` for that; make it and fix the +permissions. + +``` +mkdir /etc/matrix-synapse/logconf.d +chgrp matrix-synapse /etc/matrix-synapse/logconf.d +chmod 750 /etc/matrix-synapse/logconf.d +``` + +There's a lot you can configure for logging, but for now we'll give every +worker the same layout. Here's the configuration for the `login` worker: + +``` +version: 1 +formatters: + precise: + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: precise + filename: /var/log/matrix-synapse/login.log + when: midnight + backupCount: 3 + encoding: utf8 + + buffer: + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler + target: file + capacity: 10 + flushLevel: 30 + period: 5 + +loggers: + synapse.metrics: + level: WARN + handlers: [buffer] + synapse.replication.tcp: + level: WARN + handlers: [buffer] + synapse.util.caches.lrucache: + level: WARN + handlers: [buffer] + twisted: + level: WARN + handlers: [buffer] + synapse: + level: INFO + handlers: [buffer] + +root: + level: INFO + handlers: [buffer] +``` + +The only thing you need to change if the filename to which the logs are +written. You could create only one configuration and use that in every worker, +but that would mean all logs will end up in the same file, which is probably +not what you want. + +See the [Python +documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema) +for all the ins and outs of logging. + + +# Systemd + +You want Synapse and its workers managed by systemd. First of all we define a +`target`: a group of services that belong together. + +``` +systemctl edit --force --full matrix-synapse.target +``` + +Feed it with this bit: + +``` +[Unit] +Description=Matrix Synapse with all its workers +After=network.target + +[Install] +WantedBy=multi-user.target +``` + +First add `matrix-synapse.service` to this target by overriding the `WantedBy` +in the unit file. We're overriding and adding a bit more. + +``` +systemctl edit matrix-synapse.service +``` + +Add this to the overrides: + +``` +[Unit] +PartOf=matrix-synapse.target +Before=matrix-synapse-worker +ReloadPropagatedFrom=matrix-synapse.target + +[Service] +RuntimeDirectory=matrix-synapse +RuntimeDirectoryMode=0770 +RuntimeDirectoryPreserve=yes + +[Install] +WantedBy=matrix-synapse.target +``` + +The additions under `Unit` mean that `matrix-synapse.service` is part of the +target we created earlier, and that is should start before the workers. +Restarting the target means this service must be restarted too. + +Under `Service` we define the directory where the sockets live (`/run` is +prefixed automatically), its permissions and that it should not be removed if +the service is stopped. + +The `WantedBy` under `Install` includes it in the target. The target itself is +included in `multi-user.target`, so it should always be started in the multi-user +runlevel. + +For the workers we're using a template instead of separate unit files for every +single one. Create the template: + +``` +systemctl edit --full --force matrix-synapse-worker@ +``` + +Mind the `@` at the end, that's not a typo. Fill it with this content: + +``` +[Unit] +Description=Synapse worker %i +AssertPathExists=/etc/matrix-synapse/workers/%i.yaml + +# This service should be restarted when the synapse target is restarted. +PartOf=matrix-synapse.target +ReloadPropagatedFrom=matrix-synapse.target + +# if this is started at the same time as the main, let the main process start +# first, to initialise the database schema. +After=matrix-synapse.service + +[Service] +Type=notify +NotifyAccess=main +User=matrix-synapse +Group=matrix-synapse +WorkingDirectory=/var/lib/matrix-synapse +ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=3 +SyslogIdentifier=matrix-synapse-%i + +[Install] +WantedBy=matrix-synapse.target +``` + +Now you can start/stop/restart every worker individually. Starting the `login` +worker would be done by: + +``` +systemctl start matrix-synapse-worker@login +``` + +Every worker needs to be enabled and started individually. Quickest way to do +that, is to run a loop in the directory: + +``` +cd /etc/matrix-synapse/workers +for worker in `ls *yaml | sed -n 's/\.yaml//p'`; do systemctl enable matrix-synapse-worker@$worker; done +``` + +After a reboot, Synapse and all its workers should be started. But starting +the target should also do that: + +``` +systemctl start matrix-synapse.target +``` + +This should start `matrix-synapse.service` first, the main worker. After that +all the workers should be started too. Check if the correct sockets appear and +if there are any error messages in the logs. + + +# nginx + +We may have a lot of workers, but if nginx doesn't forward traffic to the +correct worker(s), it won't work. We're going to have to change nginx's +configuration quite a bit. + +See [Deploying a Synapse Homeserver with +Docker](https://tcpipuk.github.io/synapse/deployment/nginx.html) for the +inspiration. This details a Docker installation, which we don't have, but the +reasoning behind it applies to our configuration too. + +Here's [how to configure nginx for use with workers](../../nginx/workers). diff --git a/synapse/workers/federation_receiver1.yaml b/synapse/workers/federation_receiver1.yaml new file mode 100644 index 0000000..64f394f --- /dev/null +++ b/synapse/workers/federation_receiver1.yaml @@ -0,0 +1,15 @@ +worker_app: "synapse.app.generic_worker" +worker_name: "federation_reader1" +worker_log_config: "/etc/matrix-synapse/logconf.d/federation_reader-log.yaml" + +worker_listeners: + - path: "/run/matrix-synapse/replication_federation_reader1.sock" + type: http + resources: + - names: [replication] + + - path: "/run/matrix-synapse/inbound_federation_reader1.sock" + type: http + resources: + - names: [federation] + diff --git a/synapse/workers/federation_sender1.yaml b/synapse/workers/federation_sender1.yaml new file mode 100644 index 0000000..d2b0399 --- /dev/null +++ b/synapse/workers/federation_sender1.yaml @@ -0,0 +1,10 @@ +worker_app: "synapse.app.generic_worker" +worker_name: "federation_sender1" +worker_log_config: "/etc/matrix-synapse/logconf.d/federation_sender-log.yaml" + +worker_listeners: + - path: "/run/matrix-synapse/replication_federation_sender1.sock" + type: http + resources: + - names: [replication] + diff --git a/synapse/workers/initial_sync1.yaml b/synapse/workers/initial_sync1.yaml new file mode 100644 index 0000000..45d9b85 --- /dev/null +++ b/synapse/workers/initial_sync1.yaml @@ -0,0 +1,19 @@ +worker_app: "synapse.app.generic_worker" +worker_name: "initial_sync1" +worker_log_config: "/etc/matrix-synapse/logconf.d/initial_sync-log.yaml" + +worker_listeners: + + - path: "/run/matrix-synapse/inbound_initial_sync1.sock" + type: http + resources: + - names: + - client + - consent + - federation + + - path: "/run/matrix-synapse/replication_initial_sync1.sock" + type: http + resources: + - names: [replication] + diff --git a/synapse/workers/login-log.yaml b/synapse/workers/login-log.yaml new file mode 100644 index 0000000..7cb5975 --- /dev/null +++ b/synapse/workers/login-log.yaml @@ -0,0 +1,41 @@ +version: 1 +formatters: + precise: + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: precise + filename: /var/log/matrix-synapse/login.log + when: midnight + backupCount: 3 + encoding: utf8 + + buffer: + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler + target: file + capacity: 10 + flushLevel: 30 + period: 5 + +loggers: + synapse.metrics: + level: WARN + handlers: [buffer] + synapse.replication.tcp: + level: WARN + handlers: [buffer] + synapse.util.caches.lrucache: + level: WARN + handlers: [buffer] + twisted: + level: WARN + handlers: [buffer] + synapse: + level: INFO + handlers: [buffer] + +root: + level: INFO + handlers: [buffer] + diff --git a/synapse/workers/login.yaml b/synapse/workers/login.yaml new file mode 100644 index 0000000..c21bd54 --- /dev/null +++ b/synapse/workers/login.yaml @@ -0,0 +1,19 @@ +worker_app: "synapse.app.generic_worker" +worker_name: "login" +worker_log_config: "/etc/matrix-synapse/logconf.d/login-log.yaml" + +worker_listeners: + + - path: "/run/matrix-synapse/inbound_login.sock" + type: http + resources: + - names: + - client + - consent + - federation + + - path: "/run/matrix-synapse/replication_login.sock" + type: http + resources: + - names: [replication] + diff --git a/synapse/workers/media-log.yaml b/synapse/workers/media-log.yaml new file mode 100644 index 0000000..bbddbc1 --- /dev/null +++ b/synapse/workers/media-log.yaml @@ -0,0 +1,41 @@ +version: 1 +formatters: + precise: + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: precise + filename: /var/log/matrix-synapse/media.log + when: midnight + backupCount: 3 + encoding: utf8 + + buffer: + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler + target: file + capacity: 10 + flushLevel: 30 + period: 5 + +loggers: + synapse.metrics: + level: WARN + handlers: [buffer] + synapse.replication.tcp: + level: WARN + handlers: [buffer] + synapse.util.caches.lrucache: + level: WARN + handlers: [buffer] + twisted: + level: WARN + handlers: [buffer] + synapse: + level: INFO + handlers: [buffer] + +root: + level: INFO + handlers: [buffer] + diff --git a/synapse/workers/media.yaml b/synapse/workers/media.yaml new file mode 100644 index 0000000..65b3bf1 --- /dev/null +++ b/synapse/workers/media.yaml @@ -0,0 +1,15 @@ +worker_app: "synapse.app.media_repository" +worker_name: "mediaworker" +worker_log_config: "/etc/matrix-synapse/logconf.d/media-log.yaml" + +worker_listeners: + - path: "/run/matrix-synapse/inbound_mediaworker.sock" + type: http + resources: + - names: [media] + + - path: "/run/matrix-synapse/replication_mediaworker.sock" + type: http + resources: + - names: [replication] +