forked from fediversity/fediversity
		
	Compare commits
	
		
			8 commits
		
	
	
		
			main
			...
			pixelfed-c
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 51e6bc7ca6 | |||
| 8e001c5dc6 | |||
| 1f67866982 | |||
| 54735f1bc5 | |||
| 7645c9e225 | |||
| d988def944 | |||
| ae10086005 | |||
| e888db9f4b | 
					 143 changed files with 999 additions and 6171 deletions
				
			
		
							
								
								
									
										10
									
								
								.envrc
									
										
									
									
									
								
							
							
						
						
									
										10
									
								
								.envrc
									
										
									
									
									
								
							|  | @ -1,10 +0,0 @@ | |||
| #!/usr/bin/env bash | ||||
| # the shebang is ignored, but nice for editors | ||||
| 
 | ||||
| # shellcheck shell=bash | ||||
| if type -P lorri &>/dev/null; then | ||||
|   eval "$(lorri direnv --flake .)" | ||||
| else | ||||
|   echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]' | ||||
|   use flake | ||||
| fi | ||||
|  | @ -15,15 +15,8 @@ jobs: | |||
|       - uses: actions/checkout@v4 | ||||
|       - run: nix build .#checks.x86_64-linux.pre-commit -L | ||||
| 
 | ||||
|   check-website: | ||||
|   check-pixelfed-garage: | ||||
|     runs-on: native | ||||
|     steps: | ||||
|       - uses: actions/checkout@v4 | ||||
|       - run: cd website && nix-build -A tests | ||||
|       - run: cd website && nix-build -A build | ||||
| 
 | ||||
|   check-peertube: | ||||
|     runs-on: native | ||||
|     steps: | ||||
|       - uses: actions/checkout@v4 | ||||
|       - run: nix build .#checks.x86_64-linux.peertube -L | ||||
|       - run: nix build .#checks.x86_64-linux.pixelfed-garage -L | ||||
|  |  | |||
							
								
								
									
										1
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -6,6 +6,7 @@ tmp/ | |||
| .proxmox | ||||
| /.pre-commit-config.yaml | ||||
| nixos.qcow2 | ||||
| .envrc | ||||
| .direnv | ||||
| result* | ||||
| .nixos-test-history | ||||
|  |  | |||
|  | @ -15,14 +15,12 @@ details as to what they are for. As an overview: | |||
| - [`infra/`](./infra) contains the configurations for the various VMs that are | ||||
|   in production for the project, for instance the Git instances or the Wiki. | ||||
| 
 | ||||
| - [`keys/`](./keys) contains the public keys of the contributors to this project | ||||
|   as well as the systems that we administrate. | ||||
| 
 | ||||
| - [`matrix/`](./matrix) contains everything having to do with setting up a | ||||
|   fully-featured Matrix server. | ||||
| 
 | ||||
| - [`secrets/`](./secrets) contains the secrets that need to get injected into | ||||
|   machine configurations. | ||||
| - [`server/`](./server) contains the configuration of the VM hosting the | ||||
|   website. This should be integrated into `infra/` shortly in the future, as | ||||
|   tracked in https://git.fediversity.eu/Fediversity/Fediversity/issues/31. | ||||
| 
 | ||||
| - [`services/`](./services) contains our effort to make Fediverse applications | ||||
|   work seemlessly together in our specific setting. | ||||
|  |  | |||
|  | @ -1,7 +1,12 @@ | |||
| { inputs, self, ... }: | ||||
| 
 | ||||
| let | ||||
|   allVmIds = builtins.genList (x: 100 + x) 156; # 100 -- 255 | ||||
|   allVmIds = # 100 -- 255 | ||||
|     let | ||||
|       allVmIdsFrom = x: if x > 255 then [ ] else [ x ] ++ allVmIdsFrom (x + 1); | ||||
|     in | ||||
|     allVmIdsFrom 100; | ||||
| 
 | ||||
|   makeInstaller = import ./makeInstaller.nix; | ||||
| 
 | ||||
| in | ||||
|  |  | |||
|  | @ -16,7 +16,7 @@ in | |||
|   options = { | ||||
|     procolix = { | ||||
|       vmid = mkOption { | ||||
|         type = types.ints.between 100 255; | ||||
|         type = types.int; | ||||
|         description = '' | ||||
|           Identifier of the machine. This is a number between 100 and 255. | ||||
|         ''; | ||||
|  |  | |||
|  | @ -2,32 +2,20 @@ | |||
| 
 | ||||
| * Quick links | ||||
| - Proxmox API doc :: https://pve.proxmox.com/pve-docs/api-viewer | ||||
| - Fediversity Proxmox :: http://192.168.51.81:8006/ | ||||
| - Fediversity Proxmox :: | ||||
|   - http://192.168.51.81:8006/. | ||||
|   - It is only accessible via Procolix's VPN; see with Kevin. | ||||
|   - You will need identifiers. Also see with Kevin. Select “Promox VE authentication server”. | ||||
|   - Ignore “You do not have a valid subscription” message. | ||||
| * Basic terminology | ||||
| - Node :: physical host | ||||
| * Fediversity Proxmox | ||||
| - It is only accessible via Procolix's VPN: | ||||
|   - Get credentials for the VPN portal and Proxmox from [[https://git.fediversity.eu/kevin][Kevin]]. | ||||
|   - Log in to the [[https://vpn.fediversity.eu/vpn-user-portal/home][VPN portal]]. | ||||
|     - Create a *New Configuration*: | ||||
|     - Select *WireGuard (UDP)* | ||||
|     - Enter some name, e.g. ~fediversity~ | ||||
|     - Click Download | ||||
|   - Write the WireGuard configuration to a file ~fediversity-vpn.config~ next to your NixOS configuration | ||||
|     - Add that file's path to ~.git/info/exclude~ and make sure it doesn't otherwise leak (for example, use [[https://github.com/ryantm/agenix][Agenix]] to manage secrets) | ||||
|   - To your NixOS configuration, add | ||||
|     #+begin_src nix | ||||
|     networking.wg-quick.interfaces.fediversity.configFile = toString ./fediversity-vpn.config; | ||||
|     #+end_src | ||||
| - Select “Promox VE authentication server”. | ||||
| - Ignore the “You do not have a valid subscription” message. | ||||
| * Automatically | ||||
| This directory contains scripts that can automatically provision or remove a | ||||
| Proxmox VM. For now, they are tied to one node in the Fediversity Proxmox, but | ||||
| it would not be difficult to make them more generic. Try: | ||||
| #+begin_src sh | ||||
| sh proxmox/provision.sh --help | ||||
| sh proxmox/remove.sh --help | ||||
| sh provision.sh --help | ||||
| sh remove.sh --help | ||||
| #+end_src | ||||
| * Preparing the machine configuration | ||||
| - It is nicer if the machine is a QEMU guest. On NixOS: | ||||
|  | @ -178,11 +178,7 @@ upload_iso () { | |||
| ## Remove ISO | ||||
| 
 | ||||
| remove_iso () { | ||||
|   printf 'Removing ISO for VM %d...\n' $1 | ||||
| 
 | ||||
|   proxmox_sync DELETE $apiurl/nodes/$node/storage/local/content/local:iso/installer-fedi$1.iso | ||||
| 
 | ||||
|   printf 'done removing ISO for VM %d.\n' $1 | ||||
|   printf 'Removing ISO for VM %d... unsupported for now. (FIXME)\n' $1 | ||||
| } | ||||
| 
 | ||||
| ################################################################################ | ||||
|  |  | |||
							
								
								
									
										702
									
								
								flake.lock
									
										
									
										generated
									
									
									
								
							
							
						
						
									
										702
									
								
								flake.lock
									
										
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -3,12 +3,11 @@ | |||
|     nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11"; | ||||
|     flake-parts.url = "github:hercules-ci/flake-parts"; | ||||
|     git-hooks.url = "github:cachix/git-hooks.nix"; | ||||
|     agenix.url = "github:ryantm/agenix"; | ||||
| 
 | ||||
|     disko.url = "github:nix-community/disko"; | ||||
| 
 | ||||
|     nixops4.url = "github:nixops4/nixops4"; | ||||
|     nixops4-nixos.url = "github:nixops4/nixops4-nixos"; | ||||
|     nixops4-nixos.url = "github:nixops4/nixops4/eval"; | ||||
|   }; | ||||
| 
 | ||||
|   outputs = | ||||
|  | @ -23,7 +22,7 @@ | |||
| 
 | ||||
|       imports = [ | ||||
|         inputs.git-hooks.flakeModule | ||||
|         inputs.nixops4.modules.flake.default | ||||
|         inputs.nixops4-nixos.modules.flake.default | ||||
| 
 | ||||
|         ./deployment/flake-part.nix | ||||
|         ./infra/flake-part.nix | ||||
|  | @ -48,10 +47,7 @@ | |||
|               optin = [ | ||||
|                 "deployment" | ||||
|                 "infra" | ||||
|                 "keys" | ||||
|                 "secrets" | ||||
|                 "services" | ||||
|                 "panel" | ||||
|               ]; | ||||
|               files = "^((" + concatStringsSep "|" optin + ")/.*\\.nix|[^/]*\\.nix)$"; | ||||
|             in | ||||
|  | @ -73,7 +69,6 @@ | |||
|           devShells.default = pkgs.mkShell { | ||||
|             packages = [ | ||||
|               pkgs.nil | ||||
|               inputs'.agenix.packages.default | ||||
|               inputs'.nixops4.packages.default | ||||
|             ]; | ||||
|             shellHook = config.pre-commit.installationScript; | ||||
|  |  | |||
|  | @ -1,58 +1,33 @@ | |||
| #+title: Infra | ||||
| 
 | ||||
| This directory contains the definition of the VMs that host our infrastructure. | ||||
| 
 | ||||
| * NixOps4 | ||||
| 
 | ||||
| Their configuration can be updated via NixOps4. Run | ||||
| 
 | ||||
| #+begin_src sh | ||||
| nixops4 deployments list | ||||
| #+end_src | ||||
| 
 | ||||
| to see the available deployments. This should be done from the root of the | ||||
| repository, otherwise NixOps4 will fail with something like: | ||||
| 
 | ||||
| #+begin_src | ||||
| nixops4 error: evaluation: error: | ||||
|        … while calling the 'getFlake' builtin | ||||
| 
 | ||||
|        error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist, evaluation: error: | ||||
|        … while calling the 'getFlake' builtin | ||||
| 
 | ||||
|        error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist | ||||
| #+end_src | ||||
| 
 | ||||
| Then, given a deployment (eg. ~git~), run | ||||
| to see the available deployments. Given a deployment (eg. ~git~), run | ||||
| 
 | ||||
| #+begin_src sh | ||||
| nixops4 apply <deployment> | ||||
| #+end_src | ||||
| 
 | ||||
| Alternatively, to run the ~default~ deployment, run | ||||
| 
 | ||||
| #+begin_src sh | ||||
| nixops4 apply | ||||
| #+end_src | ||||
| 
 | ||||
| * Deployments | ||||
| 
 | ||||
| - default :: Contains everything | ||||
| - ~git~ :: Machines hosting our Git infrastructure, eg. Forgejo and its actions | ||||
|   runners | ||||
| - ~web~ :: Machines hosting our online content, eg. the website or the wiki | ||||
| - ~other~ :: Machines without a specific purpose | ||||
| 
 | ||||
| * Machines | ||||
| * Procolix machines | ||||
| 
 | ||||
| These machines are hosted on the Procolix Proxmox instance, to which | ||||
| non-Procolix members of the project do not have access. They host our stable | ||||
| infrastructure. | ||||
| 
 | ||||
| | Machine | Proxmox     | Description            | Deployment | | ||||
| |---------+-------------+------------------------+------------| | ||||
| | vm02116 | Procolix    | Forgejo                | ~git~      | | ||||
| | vm02179 | Procolix    | /unused/               | ~other~    | | ||||
| | vm02186 | Procolix    | /unused/               | ~other~    | | ||||
| | vm02187 | Procolix    | Wiki                   | ~web~      | | ||||
| | fedi300 | Fediversity | Forgejo actions runner | ~git~      | | ||||
| | Machine | Description            | Deployment | | ||||
| |---------+------------------------+------------| | ||||
| | vm02116 | Forgejo                | ~git~        | | ||||
| | vm02179 | Forgejo actions runner | ~git~        | | ||||
| | vm02186 | Forgejo actions runner | ~git~        | | ||||
| | vm02187 | Wiki                   | ~web~        | | ||||
|  |  | |||
|  | @ -16,13 +16,6 @@ in | |||
|   system.stateVersion = "24.05"; # do not change | ||||
|   nixpkgs.hostPlatform = mkDefault "x86_64-linux"; | ||||
| 
 | ||||
|   ## This is just nice to have, but it is also particularly important for the | ||||
|   ## Forgejo CI runners because the Nix configuration in the actions is directly | ||||
|   ## taken from here. | ||||
|   nix.extraOptions = '' | ||||
|     experimental-features = nix-command flakes | ||||
|   ''; | ||||
| 
 | ||||
|   environment.systemPackages = with pkgs; [ | ||||
|     (pkgs.vim_configurable.customize { | ||||
|       name = "vim"; | ||||
|  | @ -1,10 +1,18 @@ | |||
| { config, lib, ... }: | ||||
| 
 | ||||
| let | ||||
|   inherit (lib) mkDefault; | ||||
|   inherit (lib) mkOption mkDefault; | ||||
| 
 | ||||
| in | ||||
| { | ||||
|   options = { | ||||
|     procolix.vm = { | ||||
|       name = mkOption { }; | ||||
|       ip4 = mkOption { }; | ||||
|       ip6 = mkOption { }; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   config = { | ||||
|     services.openssh = { | ||||
|       enable = true; | ||||
|  | @ -12,8 +20,8 @@ in | |||
|     }; | ||||
| 
 | ||||
|     networking = { | ||||
|       hostName = config.procolixVm.name; | ||||
|       domain = config.procolixVm.domain; | ||||
|       hostName = config.procolix.vm.name; | ||||
|       domain = "procolix.com"; | ||||
| 
 | ||||
|       ## REVIEW: Do we actually need that, considering that we have static IPs? | ||||
|       useDHCP = mkDefault true; | ||||
|  | @ -23,14 +31,16 @@ in | |||
|           ipv4 = { | ||||
|             addresses = [ | ||||
|               { | ||||
|                 inherit (config.procolixVm.ipv4) address prefixLength; | ||||
|                 address = config.procolix.vm.ip4; | ||||
|                 prefixLength = 24; | ||||
|               } | ||||
|             ]; | ||||
|           }; | ||||
|           ipv6 = { | ||||
|             addresses = [ | ||||
|               { | ||||
|                 inherit (config.procolixVm.ipv6) address prefixLength; | ||||
|                 address = config.procolix.vm.ip6; | ||||
|                 prefixLength = 64; | ||||
|               } | ||||
|             ]; | ||||
|           }; | ||||
|  | @ -38,11 +48,11 @@ in | |||
|       }; | ||||
| 
 | ||||
|       defaultGateway = { | ||||
|         address = config.procolixVm.ipv4.gateway; | ||||
|         address = "185.206.232.1"; | ||||
|         interface = "eth0"; | ||||
|       }; | ||||
|       defaultGateway6 = { | ||||
|         address = config.procolixVm.ipv6.gateway; | ||||
|         address = "2a00:51c0:12:1201::1"; | ||||
|         interface = "eth0"; | ||||
|       }; | ||||
| 
 | ||||
|  | @ -1,81 +0,0 @@ | |||
| { lib, ... }: | ||||
| 
 | ||||
| let | ||||
|   inherit (lib) mkOption; | ||||
| 
 | ||||
| in | ||||
| { | ||||
|   options.procolixVm = { | ||||
|     name = mkOption { | ||||
|       description = '' | ||||
|         The name of the machine. Most of the time, this will look like `vm02XXX` | ||||
|         or `fediYYY`. | ||||
|       ''; | ||||
|     }; | ||||
| 
 | ||||
|     domain = mkOption { | ||||
|       description = '' | ||||
|         The domain hosting the machine. Most of the time, this will be either of | ||||
|         `procolix.com`, `fediversity.eu` or `abundos.eu`. | ||||
|       ''; | ||||
|       default = "procolix.com"; | ||||
|     }; | ||||
| 
 | ||||
|     ipv4 = { | ||||
|       address = mkOption { | ||||
|         description = '' | ||||
|           The IP address of the machine, version 4. It will be injected as a | ||||
|           value in `networking.interfaces.eth0`, but it will also be used to | ||||
|           communicate with the machine via NixOps4. | ||||
|         ''; | ||||
|       }; | ||||
| 
 | ||||
|       prefixLength = mkOption { | ||||
|         description = '' | ||||
|           The subnet mask of the interface, specified as the number of bits in | ||||
|           the prefix. | ||||
|         ''; | ||||
|         default = 24; | ||||
|       }; | ||||
| 
 | ||||
|       gateway = mkOption { | ||||
|         description = '' | ||||
|           The IP address of the default gateway. | ||||
|         ''; | ||||
|         default = "185.206.232.1"; # FIXME: compute default from `address` and `prefixLength`. | ||||
|       }; | ||||
|     }; | ||||
| 
 | ||||
|     ipv6 = { | ||||
|       address = mkOption { | ||||
|         description = '' | ||||
|           The IP address of the machine, version 6. It will be injected as a | ||||
|           value in `networking.interfaces.eth0`, but it will also be used to | ||||
|           communicate with the machine via NixOps4. | ||||
|         ''; | ||||
|       }; | ||||
| 
 | ||||
|       prefixLength = mkOption { | ||||
|         description = '' | ||||
|           The subnet mask of the interface, specified as the number of bits in | ||||
|           the prefix. | ||||
|         ''; | ||||
|         default = 64; | ||||
|       }; | ||||
| 
 | ||||
|       gateway = mkOption { | ||||
|         description = '' | ||||
|           The IP address of the default gateway. | ||||
|         ''; | ||||
|         default = "2a00:51c0:12:1201::1"; # FIXME: compute default from `address` and `prefixLength`. | ||||
|       }; | ||||
|     }; | ||||
| 
 | ||||
|     hostPublicKey = mkOption { | ||||
|       description = '' | ||||
|         The host public key of the machine. It is used to filter Age secrets and | ||||
|         only keep the relevant ones, and to feed to NixOps4. | ||||
|       ''; | ||||
|     }; | ||||
|   }; | ||||
| } | ||||
|  | @ -1,57 +0,0 @@ | |||
| { | ||||
|   inputs, | ||||
|   lib, | ||||
|   config, | ||||
|   ... | ||||
| }: | ||||
| 
 | ||||
| let | ||||
|   inherit (lib) attrValues elem; | ||||
|   inherit (lib.attrsets) concatMapAttrs optionalAttrs; | ||||
|   inherit (lib.strings) removeSuffix; | ||||
| 
 | ||||
|   secretsPrefix = ../../secrets; | ||||
|   secrets = import (secretsPrefix + "/secrets.nix"); | ||||
|   keys = import ../../keys; | ||||
|   hostPublicKey = keys.systems.${config.procolixVm.name}; | ||||
| 
 | ||||
| in | ||||
| { | ||||
|   imports = [ ./options.nix ]; | ||||
| 
 | ||||
|   ssh = { | ||||
|     host = config.procolixVm.ipv4.address; | ||||
|     hostPublicKey = hostPublicKey; | ||||
|   }; | ||||
| 
 | ||||
|   nixpkgs = inputs.nixpkgs; | ||||
| 
 | ||||
|   ## The configuration of the machine. We strive to keep in this file only the | ||||
|   ## options that really need to be injected from the resource. Everything else | ||||
|   ## should go into the `./nixos` subdirectory. | ||||
|   nixos.module = { | ||||
|     imports = [ | ||||
|       inputs.agenix.nixosModules.default | ||||
|       ./options.nix | ||||
|       ./nixos | ||||
|     ]; | ||||
| 
 | ||||
|     ## Inject the shared options from the resource's `config` into the NixOS | ||||
|     ## configuration. | ||||
|     procolixVm = config.procolixVm; | ||||
| 
 | ||||
|     ## Read all the secrets, filter the ones that are supposed to be readable | ||||
|     ## with this host's public key, and add them correctly to the configuration | ||||
|     ## as `age.secrets.<name>.file`. | ||||
|     age.secrets = concatMapAttrs ( | ||||
|       name: secret: | ||||
|       optionalAttrs (elem hostPublicKey secret.publicKeys) ({ | ||||
|         ${removeSuffix ".age" name}.file = secretsPrefix + "/${name}"; | ||||
|       }) | ||||
|     ) secrets; | ||||
| 
 | ||||
|     ## FIXME: Remove direct root authentication once the NixOps4 NixOS provider | ||||
|     ## supports users with password-less sudo. | ||||
|     users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors; | ||||
|   }; | ||||
| } | ||||
|  | @ -30,4 +30,11 @@ | |||
|   security.sudo.wheelNeedsPassword = false; | ||||
| 
 | ||||
|   nix.settings.trusted-users = [ "@wheel" ]; | ||||
| 
 | ||||
|   ## FIXME: Remove direct root authentication once NixOps4 supports users with | ||||
|   ## password-less sudo. | ||||
|   users.users.root.openssh.authorizedKeys.keys = [ | ||||
|     "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY" | ||||
|     "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJg5TlS1NGCRZwMjDgBkXeFUXqooqRlM8fJdBAQ4buPg" | ||||
|   ]; | ||||
| } | ||||
|  | @ -1,34 +0,0 @@ | |||
| { | ||||
|   procolixVm = { | ||||
|     domain = "fediversity.eu"; | ||||
| 
 | ||||
|     ipv4 = { | ||||
|       address = "95.215.187.30"; | ||||
|       gateway = "95.215.187.1"; | ||||
|     }; | ||||
|     ipv6 = { | ||||
|       address = "2a00:51c0:12:1305::30"; | ||||
|       gateway = "2a00:51c0:13:1305::1"; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   nixos.module = { | ||||
|     imports = [ | ||||
|       ./forgejo-actions-runner.nix | ||||
|     ]; | ||||
| 
 | ||||
|     fileSystems."/" = { | ||||
|       device = "/dev/disk/by-uuid/cbcfaf6b-39bd-4328-9f53-dea8a9d32ecc"; | ||||
|       fsType = "ext4"; | ||||
|     }; | ||||
| 
 | ||||
|     fileSystems."/boot" = { | ||||
|       device = "/dev/disk/by-uuid/1A4E-07F4"; | ||||
|       fsType = "vfat"; | ||||
|       options = [ | ||||
|         "fmask=0022" | ||||
|         "dmask=0022" | ||||
|       ]; | ||||
|     }; | ||||
|   }; | ||||
| } | ||||
|  | @ -1,45 +1,75 @@ | |||
| { inputs, ... }: | ||||
| 
 | ||||
| { | ||||
|   inputs, | ||||
|   lib, | ||||
|   ... | ||||
| }: | ||||
| 
 | ||||
| let | ||||
|   inherit (lib) attrValues concatLists mapAttrs; | ||||
|   inherit (lib.attrsets) genAttrs; | ||||
| 
 | ||||
|   addDefaultDeployment = | ||||
|     deployments: deployments // { default = concatLists (attrValues deployments); }; | ||||
| 
 | ||||
|   makeDeployments = mapAttrs ( | ||||
|     _: vmNames: | ||||
|   nixops4Deployments.git = | ||||
|     { providers, ... }: | ||||
|     { | ||||
|       providers.local = inputs.nixops4.modules.nixops4Provider.local; | ||||
|       resources = genAttrs vmNames (vmName: { | ||||
|         _module.args = { inherit inputs; }; | ||||
|         type = providers.local.exec; | ||||
|         imports = [ | ||||
|           inputs.nixops4-nixos.modules.nixops4Resource.nixos | ||||
|           ./common/resource.nix | ||||
|           (./. + "/${vmName}") | ||||
|         ]; | ||||
|         procolixVm.name = vmName; | ||||
|       }); | ||||
|     } | ||||
|   ); | ||||
|       providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local; | ||||
| 
 | ||||
| in | ||||
| { | ||||
|   nixops4Deployments = makeDeployments (addDefaultDeployment { | ||||
|     git = [ | ||||
|       "vm02116" | ||||
|       "fedi300" | ||||
|     ]; | ||||
|     web = [ "vm02187" ]; | ||||
|     other = [ | ||||
|       "vm02179" | ||||
|       "vm02186" | ||||
|     ]; | ||||
|   }); | ||||
|       resources = { | ||||
|         vm02116 = { | ||||
|           type = providers.local.exec; | ||||
|           imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ]; | ||||
|           ssh = { | ||||
|             host = "185.206.232.34"; | ||||
|             opts = ""; | ||||
|             hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILriawl1za2jbxzelkL5v8KPmcvuj7xVBgwFxuM/zhYr"; | ||||
|           }; | ||||
|           nixpkgs = inputs.nixpkgs; | ||||
|           nixos.module = { | ||||
|             imports = [ ./vm02116 ]; | ||||
|           }; | ||||
|         }; | ||||
| 
 | ||||
|         vm02179 = { | ||||
|           type = providers.local.exec; | ||||
|           imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ]; | ||||
|           ssh = { | ||||
|             host = "185.206.232.179"; | ||||
|             opts = ""; | ||||
|             hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPAsOCOsJ0vNL9fGj0XC25ir8B+k2NlVJzsiVUx+0eWM"; | ||||
|           }; | ||||
|           nixpkgs = inputs.nixpkgs; | ||||
|           nixos.module = { | ||||
|             imports = [ ./vm02179 ]; | ||||
|           }; | ||||
|         }; | ||||
| 
 | ||||
|         vm02186 = { | ||||
|           type = providers.local.exec; | ||||
|           imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ]; | ||||
|           ssh = { | ||||
|             host = "185.206.232.186"; | ||||
|             opts = ""; | ||||
|             hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6mnBgEeyYE4tzHeFNHVNBV6KR+hAqh3PYSqlh0QViW"; | ||||
|           }; | ||||
|           nixpkgs = inputs.nixpkgs; | ||||
|           nixos.module = { | ||||
|             imports = [ ./vm02186 ]; | ||||
|           }; | ||||
|         }; | ||||
|       }; | ||||
|     }; | ||||
| 
 | ||||
|   nixops4Deployments.web = | ||||
|     { providers, ... }: | ||||
|     { | ||||
|       providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local; | ||||
| 
 | ||||
|       resources = { | ||||
|         vm02187 = { | ||||
|           type = providers.local.exec; | ||||
|           imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ]; | ||||
|           ssh = { | ||||
|             host = "185.206.232.187"; | ||||
|             opts = ""; | ||||
|             hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN24ZfdQNklKkIqfMg/+0vqENuDcy6fhT6SfAq01ae83"; | ||||
|           }; | ||||
|           nixpkgs = inputs.nixpkgs; | ||||
|           nixos.module = { | ||||
|             imports = [ ./vm02187 ]; | ||||
|           }; | ||||
|         }; | ||||
|       }; | ||||
|     }; | ||||
| } | ||||
|  |  | |||
|  | @ -1,28 +1,28 @@ | |||
| { | ||||
|   procolixVm = { | ||||
|     ipv4.address = "185.206.232.34"; | ||||
|     ipv6.address = "2a00:51c0:12:1201::20"; | ||||
|   imports = [ | ||||
|     ../common | ||||
|     ./forgejo.nix | ||||
|   ]; | ||||
| 
 | ||||
|   procolix.vm = { | ||||
|     name = "vm02116"; | ||||
|     ip4 = "185.206.232.34"; | ||||
|     ip6 = "2a00:51c0:12:1201::20"; | ||||
|   }; | ||||
| 
 | ||||
|   nixos.module = { | ||||
|     imports = [ | ||||
|       ./forgejo.nix | ||||
|     ]; | ||||
|   ## vm02116 is running on old hardware based on a Xen VM environment, so it | ||||
|   ## needs these extra options. Once the VM gets moved to a newer node, these | ||||
|   ## two options can safely be removed. | ||||
|   boot.initrd.availableKernelModules = [ "xen_blkfront" ]; | ||||
|   services.xe-guest-utilities.enable = true; | ||||
| 
 | ||||
|     ## vm02116 is running on old hardware based on a Xen VM environment, so it | ||||
|     ## needs these extra options. Once the VM gets moved to a newer node, these | ||||
|     ## two options can safely be removed. | ||||
|     boot.initrd.availableKernelModules = [ "xen_blkfront" ]; | ||||
|     services.xe-guest-utilities.enable = true; | ||||
|   fileSystems."/" = { | ||||
|     device = "/dev/disk/by-uuid/3802a66d-e31a-4650-86f3-b51b11918853"; | ||||
|     fsType = "ext4"; | ||||
|   }; | ||||
| 
 | ||||
|     fileSystems."/" = { | ||||
|       device = "/dev/disk/by-uuid/3802a66d-e31a-4650-86f3-b51b11918853"; | ||||
|       fsType = "ext4"; | ||||
|     }; | ||||
| 
 | ||||
|     fileSystems."/boot" = { | ||||
|       device = "/dev/disk/by-uuid/2CE2-1173"; | ||||
|       fsType = "vfat"; | ||||
|     }; | ||||
|   fileSystems."/boot" = { | ||||
|     device = "/dev/disk/by-uuid/2CE2-1173"; | ||||
|     fsType = "vfat"; | ||||
|   }; | ||||
| } | ||||
|  |  | |||
|  | @ -1,4 +1,4 @@ | |||
| { config, pkgs, ... }: | ||||
| { pkgs, ... }: | ||||
| let | ||||
|   domain = "git.fediversity.eu"; | ||||
| in | ||||
|  | @ -16,32 +16,22 @@ in | |||
|         HTTP_ADDR = "127.0.0.1"; | ||||
|         LANDING_PAGE = "explore"; | ||||
|       }; | ||||
|       mailer = { | ||||
|         ENABLED = true; | ||||
|         SMTP_ADDR = "mail.protagio.nl"; | ||||
|         SMTP_PORT = "587"; | ||||
|         FROM = "git@fediversity.eu"; | ||||
|         USER = "git@fediversity.eu"; | ||||
|       }; | ||||
|     }; | ||||
| 
 | ||||
|     settings.service.ENABLE_NOTIFY_MAIL = true; | ||||
|     settings.mailer = { | ||||
|       ENABLED = true; | ||||
|       PROTOCOL = "smtp+starttls"; | ||||
|       SMTP_ADDR = "mail.protagio.nl"; | ||||
|       SMTP_PORT = "587"; | ||||
|       FROM = "git@fediversity.eu"; | ||||
|       USER = "git@fediversity.eu"; | ||||
|     }; | ||||
|     secrets.mailer.PASSWD = config.age.secrets.forgejo-email-password.path; | ||||
| 
 | ||||
|     mailerPasswordFile = "/var/lib/forgejo/data/keys/forgejo-mailpw"; | ||||
|     database = { | ||||
|       type = "mysql"; | ||||
|       socket = "/run/mysqld/mysqld.sock"; | ||||
|       passwordFile = config.age.secrets.forgejo-database-password.path; | ||||
|       passwordFile = "/var/lib/forgejo/data/keys/forgejo-dbpassword"; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   age.secrets.forgejo-database-password = { | ||||
|     owner = "forgejo"; | ||||
|     group = "forgejo"; | ||||
|     mode = "440"; | ||||
|   }; | ||||
| 
 | ||||
|   users.groups.keys.members = [ "forgejo" ]; | ||||
| 
 | ||||
|   services.mysql = { | ||||
|  |  | |||
|  | @ -1,22 +1,26 @@ | |||
| { | ||||
|   procolixVm = { | ||||
|     ipv4.address = "185.206.232.179"; | ||||
|     ipv6.address = "2a00:51c0:12:1201::179"; | ||||
|   imports = [ | ||||
|     ../common | ||||
|     ./gitea-runner.nix | ||||
|   ]; | ||||
| 
 | ||||
|   procolix.vm = { | ||||
|     name = "vm02179"; | ||||
|     ip4 = "185.206.232.179"; | ||||
|     ip6 = "2a00:51c0:12:1201::179"; | ||||
|   }; | ||||
| 
 | ||||
|   nixos.module = { | ||||
|     fileSystems."/" = { | ||||
|       device = "/dev/disk/by-uuid/119863f8-55cf-4e2f-ac17-27599a63f241"; | ||||
|       fsType = "ext4"; | ||||
|     }; | ||||
|   fileSystems."/" = { | ||||
|     device = "/dev/disk/by-uuid/119863f8-55cf-4e2f-ac17-27599a63f241"; | ||||
|     fsType = "ext4"; | ||||
|   }; | ||||
| 
 | ||||
|     fileSystems."/boot" = { | ||||
|       device = "/dev/disk/by-uuid/D9F4-9BF0"; | ||||
|       fsType = "vfat"; | ||||
|       options = [ | ||||
|         "fmask=0022" | ||||
|         "dmask=0022" | ||||
|       ]; | ||||
|     }; | ||||
|   fileSystems."/boot" = { | ||||
|     device = "/dev/disk/by-uuid/D9F4-9BF0"; | ||||
|     fsType = "vfat"; | ||||
|     options = [ | ||||
|       "fmask=0022" | ||||
|       "dmask=0022" | ||||
|     ]; | ||||
|   }; | ||||
| } | ||||
|  |  | |||
							
								
								
									
										43
									
								
								infra/vm02179/gitea-runner.nix
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								infra/vm02179/gitea-runner.nix
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,43 @@ | |||
| { pkgs, ... }: | ||||
| { | ||||
| 
 | ||||
|   virtualisation.docker.enable = true; | ||||
| 
 | ||||
|   services.gitea-actions-runner = { | ||||
|     package = pkgs.forgejo-actions-runner; | ||||
|     instances.default = { | ||||
|       enable = true; | ||||
|       name = "vm02179.procolix.com"; | ||||
|       url = "https://git.fediversity.eu"; | ||||
|       # Obtaining the path to the runner token file may differ | ||||
|       token = "MKmFPY4nxfR4zPYHIRLoiJdrrfkGmcRymj0GWOAk"; | ||||
|       labels = [ | ||||
|         "docker:docker://node:16-bullseye" | ||||
|         "native:host" | ||||
|       ]; | ||||
|       hostPackages = with pkgs; [ | ||||
|         bash | ||||
|         git | ||||
|         nix | ||||
|         nodejs | ||||
|       ]; | ||||
|       settings = { | ||||
|         log.level = "info"; | ||||
|         runner = { | ||||
|           file = ".runner"; | ||||
|           capacity = 8; | ||||
|           timeout = "3h"; | ||||
|           insecure = false; | ||||
|           fetch_timeout = "5s"; | ||||
|           fetch_interval = "2s"; | ||||
|         }; | ||||
|       }; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   ## The Nix configuration of the system influences the Nix configuration | ||||
|   ## in the workflow, and our workflows are often flake-based. | ||||
|   nix.extraOptions = '' | ||||
|     experimental-features = nix-command flakes | ||||
|   ''; | ||||
| } | ||||
							
								
								
									
										1
									
								
								infra/vm02179/token.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								infra/vm02179/token.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1 @@ | |||
| MKmFPY4nxfR4zPYHIRLoiJdrrfkGmcRymj0GWOAk | ||||
|  | @ -1,22 +1,26 @@ | |||
| { | ||||
|   procolixVm = { | ||||
|     ipv4.address = "185.206.232.186"; | ||||
|     ipv6.address = "2a00:51c0:12:1201::186"; | ||||
|   imports = [ | ||||
|     ../common | ||||
|     ./gitea-runner.nix | ||||
|   ]; | ||||
| 
 | ||||
|   procolix.vm = { | ||||
|     name = "vm02186"; | ||||
|     ip4 = "185.206.232.186"; | ||||
|     ip6 = "2a00:51c0:12:1201::186"; | ||||
|   }; | ||||
| 
 | ||||
|   nixos.module = { | ||||
|     fileSystems."/" = { | ||||
|       device = "/dev/disk/by-uuid/833ac0f9-ad8c-45ae-a9bf-5844e378c44a"; | ||||
|       fsType = "ext4"; | ||||
|     }; | ||||
|   fileSystems."/" = { | ||||
|     device = "/dev/disk/by-uuid/833ac0f9-ad8c-45ae-a9bf-5844e378c44a"; | ||||
|     fsType = "ext4"; | ||||
|   }; | ||||
| 
 | ||||
|     fileSystems."/boot" = { | ||||
|       device = "/dev/disk/by-uuid/B4D5-3AF9"; | ||||
|       fsType = "vfat"; | ||||
|       options = [ | ||||
|         "fmask=0022" | ||||
|         "dmask=0022" | ||||
|       ]; | ||||
|     }; | ||||
|   fileSystems."/boot" = { | ||||
|     device = "/dev/disk/by-uuid/B4D5-3AF9"; | ||||
|     fsType = "vfat"; | ||||
|     options = [ | ||||
|       "fmask=0022" | ||||
|       "dmask=0022" | ||||
|     ]; | ||||
|   }; | ||||
| } | ||||
|  |  | |||
|  | @ -9,7 +9,7 @@ | |||
| 
 | ||||
|       name = config.networking.fqdn; | ||||
|       url = "https://git.fediversity.eu"; | ||||
|       tokenFile = config.age.secrets.forgejo-runner-token.path; | ||||
|       token = "MKmFPY4nxfR4zPYHIRLoiJdrrfkGmcRymj0GWOAk"; | ||||
| 
 | ||||
|       settings = { | ||||
|         log.level = "info"; | ||||
|  | @ -29,7 +29,6 @@ | |||
|         "docker:docker://node:16-bullseye" | ||||
|         "native:host" | ||||
|       ]; | ||||
| 
 | ||||
|       hostPackages = with pkgs; [ | ||||
|         bash | ||||
|         git | ||||
|  | @ -41,4 +40,10 @@ | |||
| 
 | ||||
|   ## For the Docker mode of the runner. | ||||
|   virtualisation.docker.enable = true; | ||||
| 
 | ||||
|   ## The Nix configuration of the system influences the Nix configuration | ||||
|   ## in the workflow, and our workflows are often flake-based. | ||||
|   nix.extraOptions = '' | ||||
|     experimental-features = nix-command flakes | ||||
|   ''; | ||||
| } | ||||
|  | @ -1,26 +1,26 @@ | |||
| { | ||||
|   procolixVm = { | ||||
|     ipv4.address = "185.206.232.187"; | ||||
|     ipv6.address = "2a00:51c0:12:1201::187"; | ||||
|   imports = [ | ||||
|     ../common | ||||
|     ./wiki.nix | ||||
|   ]; | ||||
| 
 | ||||
|   procolix.vm = { | ||||
|     name = "vm02187"; | ||||
|     ip4 = "185.206.232.187"; | ||||
|     ip6 = "2a00:51c0:12:1201::187"; | ||||
|   }; | ||||
| 
 | ||||
|   nixos.module = { | ||||
|     imports = [ | ||||
|       ./wiki.nix | ||||
|   fileSystems."/" = { | ||||
|     device = "/dev/disk/by-uuid/a46a9c46-e32b-4216-a4aa-8819b2cd0d49"; | ||||
|     fsType = "ext4"; | ||||
|   }; | ||||
| 
 | ||||
|   fileSystems."/boot" = { | ||||
|     device = "/dev/disk/by-uuid/6AB5-4FA8"; | ||||
|     fsType = "vfat"; | ||||
|     options = [ | ||||
|       "fmask=0022" | ||||
|       "dmask=0022" | ||||
|     ]; | ||||
| 
 | ||||
|     fileSystems."/" = { | ||||
|       device = "/dev/disk/by-uuid/a46a9c46-e32b-4216-a4aa-8819b2cd0d49"; | ||||
|       fsType = "ext4"; | ||||
|     }; | ||||
| 
 | ||||
|     fileSystems."/boot" = { | ||||
|       device = "/dev/disk/by-uuid/6AB5-4FA8"; | ||||
|       fsType = "vfat"; | ||||
|       options = [ | ||||
|         "fmask=0022" | ||||
|         "dmask=0022" | ||||
|       ]; | ||||
|     }; | ||||
|   }; | ||||
| } | ||||
|  |  | |||
|  | @ -1,4 +1,4 @@ | |||
| { config, ... }: | ||||
| { pkgs, ... }: | ||||
| 
 | ||||
| { | ||||
|   services.phpfpm.pools.mediawiki.phpOptions = '' | ||||
|  | @ -11,7 +11,7 @@ | |||
|     name = "Fediversity Wiki"; | ||||
|     webserver = "nginx"; | ||||
|     nginx.hostName = "wiki.fediversity.eu"; | ||||
|     passwordFile = config.age.secrets.wiki-password.path; | ||||
|     passwordFile = pkgs.writeText "password" "eiM9etha8ohmo9Ohphahpesiux0ahda6"; | ||||
|     extraConfig = '' | ||||
|       # Disable anonymous editing | ||||
|       $wgGroupPermissions['*']['edit'] = false; | ||||
|  | @ -24,7 +24,7 @@ | |||
| 
 | ||||
|       ## Permissions | ||||
|       $wgGroupPermissions['*']['edit'] = false; | ||||
|       $wgGroupPermissions['*']['createaccount'] = true; | ||||
|       $wgGroupPermissions['*']['createaccount'] = false; | ||||
|       $wgGroupPermissions['*']['autocreateaccount'] = true; | ||||
|       $wgGroupPermissions['user']['edit'] = true; | ||||
|       $wgGroupPermissions['user']['createaccount'] = true; | ||||
|  | @ -35,19 +35,6 @@ | |||
|       $wgUploadSizeWarning = 1024*1024*512; | ||||
|       $wgMaxUploadSize = 1024*1024*1024; | ||||
| 
 | ||||
|       $wgEnableEmail = true; | ||||
|       $wgPasswordSender = "wiki@fediversity.eu"; | ||||
|       $wgEmergencyContact = "wiki@fediversity.eu"; | ||||
|       $wgSMTP = [ | ||||
|         'host'      => 'mail.protagio.nl', | ||||
|         'IDHost'    => 'fediversity.eu', | ||||
|         'localhost' => 'fediversity.eu', | ||||
|         'port'      => 587, | ||||
|         'auth'      => true, | ||||
|         'username'  => 'wiki@fediversity.eu', | ||||
|       ]; | ||||
|       require_once("${config.age.secrets.wiki-smtp-password.path}"); | ||||
| 
 | ||||
|       $wgHeadScriptCode = <<<'END' | ||||
|       <link rel=me href="https://mastodon.fediversity.eu/@fediversity"> | ||||
|       END; | ||||
|  | @ -58,19 +45,17 @@ | |||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   age.secrets.wiki-smtp-password.owner = "mediawiki"; | ||||
| 
 | ||||
|   services.nginx = { | ||||
|     enable = true; | ||||
|     virtualHosts."wiki.fediversity.eu" = { | ||||
|       basicAuthFile = config.age.secrets.wiki-basicauth-htpasswd.path; | ||||
|       basicAuth = { | ||||
|         fediv = "SecretSauce123!"; | ||||
|       }; | ||||
|       forceSSL = true; | ||||
|       enableACME = true; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   age.secrets.wiki-basicauth-htpasswd.owner = "nginx"; | ||||
| 
 | ||||
|   security.acme = { | ||||
|     acceptTerms = true; | ||||
|     defaults.email = "systeemmail@procolix.com"; | ||||
|  |  | |||
|  | @ -1,32 +0,0 @@ | |||
| # Keys | ||||
| 
 | ||||
| This directory contains the SSH public keys of both contributors to the projects | ||||
| and systems that we administrate. Keys are used both for [secrets](../secrets) | ||||
| decryption and [infra](../infra) management. | ||||
| 
 | ||||
| Which private keys can be used to decrypt secrets is defined in | ||||
| [`secrets.nix`](../secrets/secrets.nix) as _all the contributors_ as well as the | ||||
| specific systems that need access to the secret in question. Adding a | ||||
| contributor of system's key to a secret requires rekeying the secret, which can | ||||
| only be done by some key that had already access to it. (Alternatively, one can | ||||
| overwrite a secret without knowing its contents.) | ||||
| 
 | ||||
| In infra management, the systems' keys are used for security reasons; they | ||||
| identify the machine that we are talking to. The contributor keys are used to | ||||
| give access to the `root` user on these machines, which allows, among other | ||||
| things, to deploy their configurations with NixOps4. | ||||
| 
 | ||||
| ## Adding a contributor | ||||
| 
 | ||||
| Adding a contributor consists of three steps: | ||||
| 
 | ||||
| 1. The contributor in question adds a file with their key to the | ||||
|    `./contributors` directory, and opens a pull request with it. | ||||
| 
 | ||||
| 2. An already-existing contributor uses their keys to [re-key the secrets](../secrets#adding-a-contributor), taking that new key into | ||||
|    account. | ||||
| 
 | ||||
| 3. An already-existing contributor redeploys the [infrastructure](../infra) to take into | ||||
|    account the new access. | ||||
| 
 | ||||
| 4. The pull request is accepted and merged. | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINpebsCsP+GUMZ2SeVKsuDMwLTQ8H1Ny3oVgf73jsgMg hedgehog 2025 | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2 kiara@procolix.eu | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY niols@wallace | ||||
|  | @ -1,32 +0,0 @@ | |||
| let | ||||
|   inherit (builtins) | ||||
|     attrValues | ||||
|     elemAt | ||||
|     foldl' | ||||
|     mapAttrs | ||||
|     match | ||||
|     readDir | ||||
|     readFile | ||||
|     ; | ||||
|   ## `mergeAttrs` and `concatMapAttrs` are in `lib.trivial` and `lib.attrsets`, | ||||
|   ## but we would rather avoid a dependency in nixpkgs for this file. | ||||
|   mergeAttrs = x: y: x // y; | ||||
|   concatMapAttrs = f: v: foldl' mergeAttrs { } (attrValues (mapAttrs f v)); | ||||
|   removePubSuffix = | ||||
|     s: | ||||
|     let | ||||
|       maybeMatch = match "(.*)\.pub" s; | ||||
|     in | ||||
|     if maybeMatch == null then s else elemAt maybeMatch 0; | ||||
|   removeTrailingWhitespace = s: elemAt (match "(.*[^[:space:]])[[:space:]]*" s) 0; | ||||
| 
 | ||||
|   collectKeys = | ||||
|     dir: | ||||
|     concatMapAttrs (name: _: { | ||||
|       "${removePubSuffix name}" = removeTrailingWhitespace (readFile (dir + "/${name}")); | ||||
|     }) (readDir dir); | ||||
| in | ||||
| { | ||||
|   contributors = collectKeys ./contributors; | ||||
|   systems = collectKeys ./systems; | ||||
| } | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGFH/Kvye5It8FojdjpsuyZQiU0kxj2wq7Zq/+61vxNn | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILriawl1za2jbxzelkL5v8KPmcvuj7xVBgwFxuM/zhYr | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOrmZ9eMPLDSiayphFhPi7vry5P2VlEr7BvIjtnpN7Td | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPAsOCOsJ0vNL9fGj0XC25ir8B+k2NlVJzsiVUx+0eWM | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6mnBgEeyYE4tzHeFNHVNBV6KR+hAqh3PYSqlh0QViW | ||||
|  | @ -1 +0,0 @@ | |||
| ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN24ZfdQNklKkIqfMg/+0vqENuDcy6fhT6SfAq01ae83 | ||||
|  | @ -5,13 +5,10 @@ include_toc: true | |||
| 
 | ||||
| # A complete Matrix installation | ||||
| 
 | ||||
| This documentation describes how to build a complete Matrix environment with | ||||
| all bells and whistles. Not just the Synapse server, but (almost) every bit | ||||
| you want. | ||||
| 
 | ||||
| The main focus will be on the server itself, Synapse, but there's a lot more | ||||
| than just that. | ||||
| This is going to be a Matrix installation with all bells and whistles. Not | ||||
| just the server, but every other bit that you need or want. | ||||
| 
 | ||||
| We're building it with workers, so it will scale. | ||||
| 
 | ||||
| ## Overview | ||||
| 
 | ||||
|  | @ -27,93 +24,28 @@ conferencing | |||
| * [Consent | ||||
| tracking](https://element-hq.github.io/synapse/latest/consent_tracking.html) | ||||
| * Authentication via | ||||
| [OpenID](https://element-hq.github.io/synapse/latest/openid.html) (later) | ||||
| * Several [bridges](https://matrix.org/ecosystem/bridges/) (later) | ||||
| [OpenID](https://element-hq.github.io/synapse/latest/openid.html) | ||||
| * Several [bridges](https://matrix.org/ecosystem/bridges/) | ||||
| 
 | ||||
| 
 | ||||
| # Overview | ||||
| # Synapse | ||||
| 
 | ||||
| This documentation aims to describe the installation of a complete Matrix | ||||
| platform, with all bells and whistles. Several components are involved and | ||||
| finishing the installation of one can be necessary for the installation of the | ||||
| next. | ||||
| This is the core component: the Matrix server itself. | ||||
| 
 | ||||
| Before you start, make sure you take a look at the [checklist](checklist.md). | ||||
| 
 | ||||
| These are the components we're going to use: | ||||
| 
 | ||||
| 
 | ||||
| ## Synapse | ||||
| 
 | ||||
| This is the core component: the Matrix server itself, you should probably | ||||
| install this first.  | ||||
| 
 | ||||
| Because not every usecase is the same, we'll describe two different | ||||
| architectures: | ||||
| 
 | ||||
| ** [Monolithic](synapse) | ||||
| 
 | ||||
| This is the default way of installing Synapse, this is suitable for scenarios | ||||
| with not too many users, and, importantly, users do not join many very crowded | ||||
| rooms. | ||||
| 
 | ||||
| ** [Worker-based](synapse/workers) | ||||
| 
 | ||||
| For servers that get a bigger load, for example those that host users that use | ||||
| many big rooms, we'll describe how to process that higher load by distributing | ||||
| it over workers. | ||||
| 
 | ||||
| 
 | ||||
| ## PostgreSQL | ||||
| 
 | ||||
| This is the database Synapse uses. This should be the first thing you install | ||||
| after Synapse, and once you're done, reconfigure the default Synapse install | ||||
| to use PostgreSQL. | ||||
| 
 | ||||
| If you have already added stuff to the SQLite database that Synapse installs | ||||
| by default that you don't want to lose: [here's how to migrate from SQLite to | ||||
| PostgreSQL](https://element-hq.github.io/synapse/latest/postgres.html#porting-from-sqlite). | ||||
| 
 | ||||
| 
 | ||||
| ## nginx | ||||
| 
 | ||||
| We need a webserver for several things, see how to [configure nginx](nginx) | ||||
| here. | ||||
| 
 | ||||
| If you install this, make sure to check which certificates you need, fix the | ||||
| DNS entries and probably keep TTL for for those entries very low until after | ||||
| the installation, when you know everything's working. | ||||
| 
 | ||||
| 
 | ||||
| ## Element Call | ||||
| 
 | ||||
| Element Call is the new way to have audio and video conferences, both | ||||
| one-on-one and with groups. This does not use Jitsi and keeps E2EE intact. See | ||||
| how to [setup and configure it](element-call). | ||||
| 
 | ||||
| 
 | ||||
| # Element Web | ||||
| 
 | ||||
| This is the fully-fledged web client, which is very [easy to set | ||||
| up](element-web). | ||||
| Installation and configuration is documented under [synapse](synapse). | ||||
| 
 | ||||
| 
 | ||||
| # TURN | ||||
| 
 | ||||
| We may need a TURN server, and we'll use | ||||
| [coturn](coturn) for that. | ||||
| [coturn](https://github.com/coturn/coturn) for that. | ||||
| 
 | ||||
| It's apparently also possible to use the built-in TURN server in Livekit, | ||||
| which we'll use if we use [Element Call](element-call). It's either/or, so make | ||||
| sure you pick the right approach. | ||||
| 
 | ||||
| You could possibly use both coturn and LiveKit, if you insist on being able to | ||||
| use both legacy and Element Call functionality. This is not documented here | ||||
| yet. | ||||
| which we'll use if we use [Element Call](call). It's either/or, so make sure | ||||
| you pick the right approach. | ||||
| 
 | ||||
| 
 | ||||
| # Draupnir | ||||
| # Wiki | ||||
| 
 | ||||
| With Draupnir you can do moderation. It requires a few changes to both Synapse | ||||
| and nginx, here's how to [install and configure Draupnir](draupnir). | ||||
| Of course there's a wiki in this repository. | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										16
									
								
								matrix/call/README.md
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								matrix/call/README.md
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,16 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Element Call | ||||
| 
 | ||||
| Element Call enables users to have audio and videocalls with groups, while | ||||
| maintaining full E2E encryption. | ||||
| 
 | ||||
| It requires several bits of software and entries in .well-known/matrix/client | ||||
| 
 | ||||
| This bit is for later, but here's a nice bit of documentation to start: | ||||
| 
 | ||||
| https://sspaeth.de/2024/11/sfu/ | ||||
| 
 | ||||
|  | @ -1,97 +0,0 @@ | |||
| # Checklist | ||||
| 
 | ||||
| Before you dive in and start installing, you should do a little planning | ||||
| ahead. Ask yourself what you expect from your server. | ||||
| 
 | ||||
| Is it a small server, just for yourself and some friends and family, or for | ||||
| your hundreds of colleagues at work? Is it for private use, or do you need | ||||
| decent moderation tools? Do you need audio and videoconferencing or not? | ||||
| 
 | ||||
| 
 | ||||
| # Requirements | ||||
| 
 | ||||
| It's difficult to specify hardware requirements upfront, because they don't | ||||
| really depend on the number of users you have, but on their behaviour. A | ||||
| server with users who don't engage in busy rooms like | ||||
| [#matrix:matrix.org](https://matrix.to/#/#matrix:matrix.org) doesn't need more | ||||
| than 2 CPU cores, 8GB of RAM and 50GB of diskspace. | ||||
| 
 | ||||
| A server with users who do join very busy rooms, can easily eat 4 cores and | ||||
| 16GB of RAM. Or more. Or even much more. If you have a public server, where | ||||
| unknown people can register new accounts, you'll probably need a bit more | ||||
| oompf (and [moderation](draupnir)). | ||||
| 
 | ||||
| During its life, the server may need more resources, if users change | ||||
| their behaviour. Or less. There's no one-size-fits-all approach. | ||||
| 
 | ||||
| If you have no idea, you should probably start with 2 cores, 8GB RAM and some | ||||
| 50GB diskspace, and follow the [monolithic setup](synapse). | ||||
| 
 | ||||
| If you expect a higher load (you might get there sooner than you think), you | ||||
| should probably follow the [worker-based setup](synapse/workers), because | ||||
| changing the architecture from monolithic to worker-based once the server is | ||||
| already in use, is a tricky task. | ||||
| 
 | ||||
| Here's a ballpark figure. Remember, your mileage will probably vary. And | ||||
| remember, just adding RAM and CPU doesn't automatically scale: you'll need to | ||||
| tune [PostgreSQL](postgresql/README.md#tuning) and your workers as well so | ||||
| that your hardware is optimally used. | ||||
| 
 | ||||
| |  Scenario                             | Architecture                    | CPU    | RAM    | Diskspace (GB) | | ||||
| | :------------------------------------ | :-----------------------------: | :----: | :----: | :------------: | | ||||
| | Personal, not many very busy rooms    | [monolithic](synapse)           | 2      | 8GB    | 50             | | ||||
| | Private, users join very busy rooms   | [worker-based](synapse/workers) | 4      | 16GB   | 100            | | ||||
| | Public, many users in very busy rooms | [worker-based](synapse/workers) | 8      | 32GB   | 250            | | ||||
| 
 | ||||
| 
 | ||||
| # DNS and certificates | ||||
| 
 | ||||
| You'll need to configure several things in DNS, and you're going to need a | ||||
| couple of TLS-certificates. Best to configure those DNS entries first, so that | ||||
| you can quickly generate the certificates once you're there. | ||||
| 
 | ||||
| It's usually a good idea to keep the TTL of all these records very low while | ||||
| installing and configuring, so that you can quickly change records without | ||||
| having to wait for the TTL to expire. Setting a TTL of 300 (5 minutes) should | ||||
| be fine. Once everything is in place and working, you should probably increase | ||||
| it to a more production ready value, like 3600 (1 hour) or more. | ||||
| 
 | ||||
| What do you need? Well, first of all you need a domain. In this documentation | ||||
| we'll use `example.com`, you'll need to substitute that with your own domain. | ||||
| 
 | ||||
| Under the top of that domain, you'll need to host 2 files under | ||||
| `/.well-known`, so you'll need a webserver there, using a valid | ||||
| TLS-certificate. This doesn't have to be the same machine as the one you're | ||||
| installing Synapse on. In fact, it usually isn't. | ||||
| 
 | ||||
| Assuming you're hosting Matrix on the machine `matrix.example.com`, you need | ||||
| at least an `A` record in DNS, and -if you have IPv6 support, which you | ||||
| should- an `AAAA` record too. **YOU CAN NOT USE A CNAME FOR THIS RECORD!** | ||||
| You'll need a valid TLS-certificate for `matrix.example.com` too. | ||||
| 
 | ||||
| You'll probably want the webclient too, so that users aren't forced to use an | ||||
| app on their phone or install the desktop client on their PC. You should never | ||||
| run the web client on the same name as the server, that opens you up for all | ||||
| kinds of Cross-Site-Scripting attack. We'll assume you use | ||||
| `element.example.com` for the web client. You need a DNS entry for that. This | ||||
| can be a CNAME, but make sure you have a TLS-certificate with the correct name | ||||
| on it. | ||||
| 
 | ||||
| If you install a [TURN-server](coturn), either for legacy calls or for [Element | ||||
| Call](element-call) (or both), you need a DNS entry for that too, and -again- a | ||||
| TLS-certificate. We'll use `turn.example.com` for this. | ||||
| 
 | ||||
| If you install Element Call (and why shouldn't you?), you need a DNS entry plus | ||||
| certificate for that, let's assume you use `call.example.com` for that. This | ||||
| can be a CNAME again. Element Call uses [LiveKit](element-call#livekit) for the | ||||
| actual processing of audio and video, and that needs its own DNS entry and certificate | ||||
| too. We'll use `livekit.example.com`. | ||||
| 
 | ||||
| | FQDN                  | Use                    | Comment                                  | | ||||
| | :-------------------- | :--------------------- | :--------------------------------------- | | ||||
| | `example.com`         | Hosting `.well-known`  | This is the `server_name`                | | ||||
| | `matrix.example.com`  | Synapse server         | This is the `base_url`, can't be `CNAME` | | ||||
| | `element.example.com` | Webclient              |                                          | | ||||
| | `turn.example.com`    | TURN / Element Call    | Highly recommended                       | | ||||
| | `call.example.com`    | Element Call           | Optional                                 | | ||||
| | `livekit.example.com` | LiveKit SFU            | Optional, needed for Element Call        | | ||||
|  | @ -1,181 +0,0 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # TURN server | ||||
| 
 | ||||
| You need a TURN server to connect participants that are behind a NAT firewall. | ||||
| Because IPv6 doesn't really need TURN, and Chrome can get confused if it has | ||||
| to use TURN over IPv6, we'll stick to a strict IPv4-only configuration. | ||||
| 
 | ||||
| Also, because VoIP traffic is only UDP, we won't do TCP. | ||||
| 
 | ||||
| TURN-functionality can be offered by coturn and LiveKit alike: coturn is used | ||||
| for legacy calls (only one-on-one, supported in Element Android), whereas | ||||
| Element Call (supported by ElementX, Desktop and Web) uses LiveKit. | ||||
| 
 | ||||
| In our documentation we'll enable both, which is probably not the optimal | ||||
| solution, but at least it results in a system that supports old and new | ||||
| clients. | ||||
| 
 | ||||
| Here we'll describe coturn, the dedicated ICE/STUN/TURN server that needs to | ||||
| be configured in Synapse, [LiveKit](../element-call#livekit) has its own page. | ||||
| 
 | ||||
| # Installation | ||||
| 
 | ||||
| Installation is short: | ||||
| 
 | ||||
| ``` | ||||
| apt install coturn | ||||
| ``` | ||||
| 
 | ||||
| For sake of maintainability we'll move the only configuration file into its | ||||
| own directoy: | ||||
| 
 | ||||
| ``` | ||||
| mkdir /etc/coturn | ||||
| mv /etc/turnserver.conf /etc/coturn | ||||
| ``` | ||||
| 
 | ||||
| We need to tell systemd to start it with the configuration file on the new | ||||
| place. Edit the service file with: | ||||
| 
 | ||||
| ``` | ||||
| systemctl edit coturn | ||||
| ``` | ||||
| 
 | ||||
| Contrary to what the comment suggests, only the parts you add will override | ||||
| the content that's already there. We have to "clean" the `ExecStart` first, | ||||
| before we assign a new line to it, so this is the bit we add: | ||||
| 
 | ||||
| ``` | ||||
| [Service] | ||||
| ExecStart= | ||||
| ExecStart=/usr/bin/turnserver -c /etc/coturn/turnserver.conf --pidfile=/etc/coturn/run/turnserver.pid | ||||
| ``` | ||||
| 
 | ||||
| Create the directory `/etc/coturn/run` and chgrp it to `turnserver`, so that | ||||
| coturn can write its pid there: `/run/turnserver.pid` can't be written because | ||||
| coturn doesn't run as root. | ||||
| 
 | ||||
| This prepares us for the next step: configuring the whole thing. | ||||
| 
 | ||||
| 
 | ||||
| # DNS and certificate {#dnscert} | ||||
| 
 | ||||
| As stated before, we only use IPv4, so a CNAME to our machine that also does | ||||
| IPv6 is a bad idea. Fix a new entry in DNS for TURN only, we'll use | ||||
| `turn.example.com` here. | ||||
| 
 | ||||
| Make sure this entry only has an A record, no AAAA. | ||||
| 
 | ||||
| Get a certificate for this name: | ||||
| 
 | ||||
| ``` | ||||
| certbot certonly --nginx -d turn.example.com | ||||
| ``` | ||||
| 
 | ||||
| This assumes you've already setup and started nginx (see [nginx](../nginx)). | ||||
| 
 | ||||
| {#fixssl} | ||||
| The certificate files reside under `/etc/letsencrypt/live`, but coturn and | ||||
| LiveKit don't run as root, and can't read them. Therefore we create the directory | ||||
| `/etc/coturn/ssl` where we copy the files to. This script should be run after | ||||
| each certificate renewal: | ||||
| 
 | ||||
| ``` | ||||
| #!/bin/bash | ||||
| 
 | ||||
| # This script is hooked after a renewal of the certificate, so that the | ||||
| # certificate files are copied and chowned, and made readable by coturn: | ||||
| 
 | ||||
| cd /etc/coturn/ssl | ||||
| cp /etc/letsencrypt/live/turn.example.com/{fullchain,privkey}.pem . | ||||
| chown turnserver:turnserver *.pem | ||||
| 
 | ||||
| # Make sure you only start/restart the servers that you need! | ||||
| systemctl try-reload-or-restart coturn livekit-server | ||||
| 
 | ||||
| ``` | ||||
| 
 | ||||
| Run this automatically after every renewal by adding this line to | ||||
| `/etc/letsencrypt/renewal/turn.example.com.conf`: | ||||
| 
 | ||||
| ``` | ||||
| renew_hook = /etc/coturn/fixssl | ||||
| ``` | ||||
| 
 | ||||
| Yes, it's a bit primitive and could (should?) be polished. But for now: it | ||||
| works. This will copy and chown the certificate files and restart coturn | ||||
| and/or LiveKit, depending on if they're running or not. | ||||
| 
 | ||||
| 
 | ||||
| # Configuration {#configuration} | ||||
| 
 | ||||
| Synapse's documentation gives a reasonable [default | ||||
| config](https://element-hq.github.io/synapse/latest/setup/turn/coturn.html). | ||||
| 
 | ||||
| We'll need a shared secret that Synapse can use to control coturn, so let's | ||||
| create that first: | ||||
| 
 | ||||
| ``` | ||||
| pwgen -s 64 1 | ||||
| ``` | ||||
| 
 | ||||
| Now that we have this, we can configure our configuration file under | ||||
| `/etc/coturn/turnserver.conf`. | ||||
| 
 | ||||
| ``` | ||||
| # We don't use the default ports, because LiveKit uses those | ||||
| listening-port=3480 | ||||
| tls-listening-port=5351 | ||||
| 
 | ||||
| # We don't need more than 10000 connections: | ||||
| min-port=40000 | ||||
| max-port=49999 | ||||
| 
 | ||||
| use-auth-secret | ||||
| static-auth-secret=<previously created secret> | ||||
| 
 | ||||
| realm=turn.example.com | ||||
| user-quota=12 | ||||
| total-quota=1200 | ||||
| 
 | ||||
| # Of course: substitute correct IPv4 address: | ||||
| listening-ip=111.222.111.222 | ||||
| 
 | ||||
| # VoIP traffic is only UDP | ||||
| no-tcp-relay | ||||
| 
 | ||||
| # coturn doesn't run as root, so the certificate has | ||||
| # to be copied/chowned here. | ||||
| cert=/etc/coturn/ssl/fullchain.pem | ||||
| pkey=/etc/coturn/ssl/privkey.pem | ||||
| 
 | ||||
| denied-peer-ip=0.0.0.0-255.255.255.255 | ||||
| denied-peer-ip=127.0.0.0-0.255.255.255 | ||||
| denied-peer-ip=10.0.0.0-10.255.255.255 | ||||
| denied-peer-ip=172.16.0.0-172.31.255.255 | ||||
| denied-peer-ip=192.168.0.0-192.168.255.255 | ||||
| denied-peer-ip=100.64.0.0-100.127.255.255 | ||||
| denied-peer-ip=192.0.0.0-192.0.0.255 | ||||
| denied-peer-ip=169.254.0.0-169.254.255.255 | ||||
| denied-peer-ip=192.88.99.0-192.88.99.255 | ||||
| denied-peer-ip=198.18.0.0-198.19.255.255 | ||||
| denied-peer-ip=192.0.2.0-192.0.2.255 | ||||
| denied-peer-ip=198.51.100.0-198.51.100.255 | ||||
| denied-peer-ip=203.0.113.0-203.0.113.255 | ||||
| 
 | ||||
| # We do only IPv4 | ||||
| allocation-default-address-family="ipv4" | ||||
| 
 | ||||
| # No weak TLS | ||||
| no-tlsv1 | ||||
| no-tlsv1_1 | ||||
| ``` | ||||
| 
 | ||||
| All other options in the configuration file are either commented out, or | ||||
| defaults. | ||||
| 
 | ||||
| Make sure you've opened the correct ports in the [firewall](../firewall). | ||||
|  | @ -1,119 +0,0 @@ | |||
| # Coturn TURN SERVER configuration file | ||||
| 
 | ||||
| # Only IPv4, IPv6 can confuse some software | ||||
| listening-ip=111.222.111.222 | ||||
| 
 | ||||
| # Listening port for TURN (UDP and TCP): | ||||
| listening-port=3480 | ||||
| 
 | ||||
| # Listening port for TURN TLS (UDP and TCP): | ||||
| tls-listening-port=5351 | ||||
| 
 | ||||
| # Lower and upper bounds of the UDP relay endpoints: | ||||
| # (default values are 49152 and 65535) | ||||
| # | ||||
| min-port=40000 | ||||
| max-port=49999 | ||||
| 
 | ||||
| use-auth-secret | ||||
| static-auth-secret=<very secure password> | ||||
| 
 | ||||
| realm=turn.example.com | ||||
| 
 | ||||
| 
 | ||||
| # Per-user allocation quota. | ||||
| # default value is 0 (no quota, unlimited number of sessions per user). | ||||
| # This option can also be set through the database, for a particular realm. | ||||
| user-quota=12 | ||||
| 
 | ||||
| # Total allocation quota. | ||||
| # default value is 0 (no quota). | ||||
| # This option can also be set through the database, for a particular realm. | ||||
| total-quota=1200 | ||||
| 
 | ||||
| # Uncomment if no TCP relay endpoints are allowed. | ||||
| # By default TCP relay endpoints are enabled (like in RFC 6062). | ||||
| # | ||||
| no-tcp-relay | ||||
| 
 | ||||
| # Certificate file. | ||||
| # Use an absolute path or path relative to the | ||||
| # configuration file. | ||||
| # Use PEM file format. | ||||
| cert=/etc/coturn/ssl/fullchain.pem | ||||
| 
 | ||||
| # Private key file. | ||||
| # Use an absolute path or path relative to the | ||||
| # configuration file. | ||||
| # Use PEM file format. | ||||
| pkey=/etc/coturn/ssl/privkey.pem | ||||
| 
 | ||||
| # Option to redirect all log output into system log (syslog). | ||||
| # | ||||
| syslog | ||||
| 
 | ||||
| # Option to allow or ban specific ip addresses or ranges of ip addresses. | ||||
| # If an ip address is specified as both allowed and denied, then the ip address is | ||||
| # considered to be allowed. This is useful when you wish to ban a range of ip | ||||
| # addresses, except for a few specific ips within that range. | ||||
| # | ||||
| # This can be used when you do not want users of the turn server to be able to access | ||||
| # machines reachable by the turn server, but would otherwise be unreachable from the | ||||
| # internet (e.g. when the turn server is sitting behind a NAT) | ||||
| # | ||||
| denied-peer-ip=0.0.0.0-255.255.255.255 | ||||
| denied-peer-ip=127.0.0.0-0.255.255.255 | ||||
| denied-peer-ip=10.0.0.0-10.255.255.255 | ||||
| denied-peer-ip=172.16.0.0-172.31.255.255 | ||||
| denied-peer-ip=192.168.0.0-192.168.255.255 | ||||
| denied-peer-ip=100.64.0.0-100.127.255.255 | ||||
| denied-peer-ip=192.0.0.0-192.0.0.255 | ||||
| denied-peer-ip=169.254.0.0-169.254.255.255 | ||||
| denied-peer-ip=192.88.99.0-192.88.99.255 | ||||
| denied-peer-ip=198.18.0.0-198.19.255.255 | ||||
| denied-peer-ip=192.0.2.0-192.0.2.255 | ||||
| denied-peer-ip=198.51.100.0-198.51.100.255 | ||||
| denied-peer-ip=203.0.113.0-203.0.113.255 | ||||
| 
 | ||||
| 
 | ||||
| # TURN server allocates address family according TURN client requested address family. | ||||
| # If address family not requested explicitly by the client, then it falls back to this default. | ||||
| # The standard RFC explicitly define that this default must be IPv4,  | ||||
| # so use other option values with care!  | ||||
| # Possible values: "ipv4" or "ipv6" or "keep"  | ||||
| # "keep" sets the allocation default address family according to  | ||||
| # the TURN client allocation request connection address family. | ||||
| allocation-default-address-family="ipv4" | ||||
| 
 | ||||
| # Turn OFF the CLI support. | ||||
| # By default it is always ON. | ||||
| # See also options cli-ip and cli-port. | ||||
| # | ||||
| no-cli | ||||
| 
 | ||||
| # Do not allow an TLS/DTLS version of protocol | ||||
| # | ||||
| no-tlsv1 | ||||
| no-tlsv1_1 | ||||
| 
 | ||||
| # Disable RFC5780 (NAT behavior discovery). | ||||
| # | ||||
| # Strongly encouraged to use this option to decrease gain factor in STUN | ||||
| # binding responses. | ||||
| # | ||||
| no-rfc5780 | ||||
| 
 | ||||
| # Disable handling old STUN Binding requests and disable MAPPED-ADDRESS | ||||
| # attribute in binding response (use only the XOR-MAPPED-ADDRESS). | ||||
| # | ||||
| # Strongly encouraged to use this option to decrease gain factor in STUN | ||||
| # binding responses. | ||||
| # | ||||
| no-stun-backward-compatibility | ||||
| 
 | ||||
| # Only send RESPONSE-ORIGIN attribute in binding response if RFC5780 is enabled. | ||||
| # | ||||
| # Strongly encouraged to use this option to decrease gain factor in STUN | ||||
| # binding responses. | ||||
| # | ||||
| response-origin-only-with-rfc5780 | ||||
|  | @ -1,130 +0,0 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Draupnir | ||||
| 
 | ||||
| Draupnir is the way to do moderation. It can exchange banlists with other | ||||
| servers, and drop reports that people send into its moderation room so that | ||||
| moderators can act upon them. | ||||
| 
 | ||||
| Start by creating a room where moderators can give Draupnir commands. This | ||||
| room should not be encrypted. Then create a user for Draupnir, this user | ||||
| should ideally be an admin user. | ||||
| 
 | ||||
| Once you've created the user, log in as this user, maybe set an avatar, join | ||||
| the room you've created and then copy the access token. This token is used by | ||||
| the Draupnir software to login. | ||||
| 
 | ||||
| After that, close the window or client, but | ||||
| do not logout. If you logout, the token will be invalidated. | ||||
| 
 | ||||
| Make sure you have the right npm, Node.js, yarn and what-have-you ([see | ||||
| Draupnir's documentation](https://the-draupnir-project.github.io/draupnir-documentation/bot/setup_debian)) | ||||
| and prepare the software: | ||||
| 
 | ||||
| ``` | ||||
| mkdir /opt | ||||
| cd /opt | ||||
| git clone https://github.com/the-draupnir-project/Draupnir.git | ||||
| cd Draupnir | ||||
| git fetch --tags | ||||
| mkdir datastorage | ||||
| yarn global add corepack | ||||
| useradd -m draupnir | ||||
| chown -R draupnir:draupnir | ||||
| ``` | ||||
| 
 | ||||
| Now, "compile" the stuff as user draupnir: | ||||
| 
 | ||||
| ``` | ||||
| sudo -u draupnir bash -c "install yarn" | ||||
| sudo -u draupnir bash -c "yarn build" | ||||
| ``` | ||||
| 
 | ||||
| When this is completed successfully, it's time to configure Draupnir. | ||||
| 
 | ||||
| 
 | ||||
| # Configuration | ||||
| 
 | ||||
| Under `config` you'll find the default configuration file, `default.yaml`. | ||||
| Copy it to `production.yaml` and change what you must. | ||||
| 
 | ||||
| | Option | Value | Meaning | | ||||
| | :---- | :---- | :---- | | ||||
| | `homeserverUrl` | `http://localhost:8008` | Where to communicate with Synapse when using network port| | ||||
| | `homeserverUrl` | `http://unix:/run/matrix-synapse/incoming_main.sock` | Where to communicate with Synapse when using UNIX sockets (see [Workers](../synapse/workers.md)) | | ||||
| | `rawHomeserverUrl` | `https://matrix.example.com` | Same as `server_name` | | ||||
| | `accessToken` | access token | Copy from login session or create in [Synapse Admin](../synapse-admin)) | | ||||
| | `password` | password | Password for the account | | ||||
| | `dataPath` | `/opt/Draupnir/datastorage` | Storage | | ||||
| | `managementRoom` | room ID | Room where moderators command Draupnir | | ||||
| 
 | ||||
| This should give a working bot. | ||||
| 
 | ||||
| There are a few other bits that you probably want to change. Draupnir can | ||||
| direct reports to the management room, this is what you should change to | ||||
| activate that: | ||||
| 
 | ||||
| ``` | ||||
| web: | ||||
|   enabled: true | ||||
|   port: 8082 | ||||
|   address: ::1 | ||||
|   abuseReporting: | ||||
|     enabled: true | ||||
| 
 | ||||
| pollReports: true | ||||
| displayReports: true | ||||
| ``` | ||||
| 
 | ||||
| For this to work (for reports to reach Draupnir) you'll need to configure | ||||
| nginx to forward requests for reports to Draupnir: | ||||
| 
 | ||||
| ``` | ||||
| location ~ ^/_matrix/client/(r0|v3)/rooms/([^/]*)/report/(.*)$ { | ||||
|     # The r0 endpoint is deprecated but still used by many clients. | ||||
|     # As of this writing, the v3 endpoint is the up-to-date version. | ||||
|      | ||||
|     # Alias the regexps, to ensure that they're not rewritten. | ||||
|     set $room_id $2; | ||||
|     set $event_id $3; | ||||
|     proxy_pass http://[::1]:8082/api/1/report/$room_id/$event_id; | ||||
| } | ||||
| 
 | ||||
| # Reports that need to reach Synapse (not sure if this is used) | ||||
| location /_synapse/admin/v1/event_reports { | ||||
|     proxy_pass http://localhost:8008; | ||||
|     proxy_set_header X-Forwarded-For $remote_addr; | ||||
|     proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     proxy_set_header Host $host; | ||||
|     client_max_body_size 50M; | ||||
|     proxy_http_version 1.1; | ||||
|      | ||||
| location ~ ^/_synapse/admin/v1/rooms/([^/]*)/context/(.*)$ { | ||||
|     set $room_id $2; | ||||
|     set $event_id $3; | ||||
|     proxy_pass http://localhost:8008/_synapse/admin/v1/rooms/$room_id/context/$event_id; | ||||
|     proxy_set_header X-Forwarded-For $remote_addr; | ||||
|     proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     proxy_set_header Host $host; | ||||
|     client_max_body_size 50M; | ||||
|     proxy_http_version 1.1; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| # Rate limiting | ||||
| 
 | ||||
| Normal users are rate limited, to prevent them from flooding the server. Draupnir | ||||
| is meant to stop those events, but if it it itself rate limited, it won't work | ||||
| all that well. | ||||
| 
 | ||||
| How rate limiting is configured server-wide is documented in [Synapse's | ||||
| documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=ratelimiting#ratelimiting). | ||||
| Overriding is, unfortunately, not something you can easily configure in the | ||||
| configuration files. You'll have to do that in the database itself: | ||||
| 
 | ||||
| ``` | ||||
| INSERT INTO ratelimit_override VALUES ('@draupnir:example.com', 0, 0); | ||||
| ``` | ||||
|  | @ -1,375 +0,0 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Overview | ||||
| 
 | ||||
| Element Call consists of a few parts, you don't have to host all of them | ||||
| yourself. In this document, we're going to host everything ourselves, so | ||||
| here's what you need. | ||||
| 
 | ||||
| * **lk-jwt**. This authenticates Synapse users to LiveKit. | ||||
| * **LiveKit**. This is the "SFU", which actually handles the audio and video, and does TURN. | ||||
| * **Element Call widget**. This is basically the webapplication, the user interface. | ||||
| 
 | ||||
| As mentioned in the [checklist](../checklist.md) you need to define these | ||||
| three entries in DNS and get certificates for them: | ||||
| 
 | ||||
| * `turn.example.com` | ||||
| * `livekit.example.com` | ||||
| * `call.example.com` | ||||
| 
 | ||||
| You may already have DNS and TLS for `turn.example.com`, as it is also used | ||||
| for [coturn](../coturn). | ||||
| 
 | ||||
| For more inspiraten, check https://sspaeth.de/2024/11/sfu/ | ||||
| 
 | ||||
| 
 | ||||
| # LiveKit {#livekit} | ||||
| 
 | ||||
| The actual SFU, Selective Forwarding Unit, is LiveKit; this is the part that | ||||
| handles the audio and video feeds and also does TURN (this TURN-functionality | ||||
| does not support the legacy calls, you'll need [coturn](coturn) for that). | ||||
| 
 | ||||
| Downloading and installing is easy: download the [binary from | ||||
| Github](https://github.com/livekit/livekit/releases/download/v1.8.0/livekit_1.8.0_linux_amd64.tar.gz) | ||||
|  to /usr/local/bin, chown it to root:root and you're done. | ||||
| 
 | ||||
| The quickest way to do precisely that, is to run the script: | ||||
| 
 | ||||
| ``` | ||||
| curl -sSL https://get.livekit.io | bash | ||||
| ``` | ||||
| 
 | ||||
| You can do this as a normal user, it will use sudo to do its job. | ||||
| 
 | ||||
| While you're at it, you might consider installing the cli tool as well, you | ||||
| can use it -for example- to generate tokens so you can [test LiveKit's | ||||
| connectivity](https://livekit.io/connection-test): | ||||
| 
 | ||||
| ``` | ||||
| curl -sSL https://get.livekit.io/cli | bash | ||||
| ``` | ||||
| 
 | ||||
| Configuring LiveKit is [documented | ||||
| here](https://docs.livekit.io/home/self-hosting/deployment/). We're going to | ||||
| run LiveKit under authorization of user `turnserver`, the same users we use | ||||
| for [coturn](coturn). This user is created when installing coturn, so if you | ||||
| haven't installed that, you should create the user yourself: | ||||
| 
 | ||||
| ``` | ||||
| adduser --system turnserver | ||||
| ``` | ||||
| 
 | ||||
| ## Configure {#keysecret} | ||||
| 
 | ||||
| Start by creating a key and secret: | ||||
| 
 | ||||
| ``` | ||||
| livekit-server generate-keys | ||||
| ``` | ||||
| 
 | ||||
| This key and secret have to be fed to lk-jwt-service too, [see here](#jwtconfig). | ||||
| Create the directory for LiveKit's configuration: | ||||
| 
 | ||||
| ``` | ||||
| mkdir /etc/livekit | ||||
| chown root:turnserver /etc/livekit | ||||
| chmod 750 /etc/livekit | ||||
| ``` | ||||
| 
 | ||||
| Create a configuration file for livekit, `/etc/livekit/livekit.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| port: 7880 | ||||
| bind_addresses: | ||||
|     - ::1 | ||||
| rtc: | ||||
|     tcp_port: 7881 | ||||
|     port_range_start: 50000 | ||||
|     port_range_end: 60000 | ||||
|     use_external_ip: true | ||||
|     enable_loopback_candidate: false | ||||
| turn: | ||||
|     enabled: true | ||||
|     domain: livekit.example.com | ||||
|     cert_file: /etc/coturn/ssl/fullchain.pem | ||||
|     key_file: /etc/coturn/ssl/privkey.pem | ||||
|     tls_port: 5349 | ||||
|     udp_port: 3478 | ||||
|     external_tls: true | ||||
| keys: | ||||
|     # KEY: SECRET were generated by "livekit-server generate-keys" | ||||
|     <KEY>: <SECRET> | ||||
| ``` | ||||
| 
 | ||||
| Being a bit paranoid: make sure LiveKit can only read this file, not write it: | ||||
| 
 | ||||
| ``` | ||||
| chown root:turnserver /etc/livekit/livekit.yaml | ||||
| chmod 640 /etc/livekit/livekit.yaml | ||||
| ``` | ||||
| 
 | ||||
| Port `7880` is forwarded by nginx: authentication is also done there, and that | ||||
| bit has to be forwarded to `lk-jwt-service` on port `8080`. Therefore, we | ||||
| listen only on localhost. | ||||
| 
 | ||||
| The TURN ports are the normal, default ones. If you also use coturn, make sure | ||||
| it doesn't use the same ports as LiveKit. Also, make sure you open the correct | ||||
| ports in the [firewall](../firewall). | ||||
| 
 | ||||
| 
 | ||||
| ## TLS certificate | ||||
| 
 | ||||
| The TLS-certificate files are not in the usual place under | ||||
| `/etc/letsencrypt/live`, see [DNS and | ||||
| certificate](../coturn/README.md#dnscert) under coturn why that is. | ||||
| 
 | ||||
| As stated before, we use the same user as for coturn. Because this user does | ||||
| not have the permission to read private keys under `/etc/letsencrypt`, we copy | ||||
| those files to a place where it can read them. For coturn we copy them to | ||||
| `/etc/coturn/ssl`, and if you use coturn and have this directory, LiveKit can | ||||
| read them there too. | ||||
| 
 | ||||
| If you don't have coturn installed, you should create a directory under | ||||
| `/etc/livekit` and copy the files to there. Modify the `livekit.yaml` file and | ||||
| the [script to copy the files](../coturn/README.md#fixssl) to use that | ||||
| directory. Don't forget to update the `renew_hook` in Letsencrypt if you do. | ||||
| 
 | ||||
| The LiveKit API listens on localhost, IPv6, port 7880. Traffic to this port is | ||||
| forwarded from port 443 by nginx, which handles TLS, so it shouldn't be reachable | ||||
| from the outside world. | ||||
| 
 | ||||
| See [LiveKit's config documentation](https://github.com/livekit/livekit/blob/master/config-sample.yaml) | ||||
| for more options. | ||||
| 
 | ||||
| 
 | ||||
| ## Systemd | ||||
| 
 | ||||
| Now define a systemd servicefile, like this: | ||||
| 
 | ||||
| ``` | ||||
| [Unit] | ||||
| Description=LiveKit Server | ||||
| After=network.target | ||||
| Documentation=https://docs.livekit.io | ||||
| 
 | ||||
| [Service] | ||||
| User=turnserver | ||||
| Group=turnserver | ||||
| LimitNOFILE=500000 | ||||
| Restart=on-failure | ||||
| WorkingDirectory=/etc/livekit | ||||
| ExecStart=/usr/local/bin/livekit-server --config /etc/livekit/livekit.yaml | ||||
| 
 | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
| ``` | ||||
| 
 | ||||
| Enable and start it. | ||||
| 
 | ||||
| Clients don't know about LiveKit yet, you'll have to give them the information | ||||
| via the `.well-known/matrix/client`: add this bit to it to point them at the | ||||
| SFU: | ||||
| 
 | ||||
| ``` | ||||
| "org.matrix.msc4143.rtc_foci": [ | ||||
|       { | ||||
|         "type": "livekit", | ||||
|         "livekit_service_url": "https://livekit.example.com" | ||||
|       } | ||||
|   ] | ||||
| ``` | ||||
| 
 | ||||
| Make sure it is served as `application/json`, just like the other .well-known | ||||
| files. | ||||
| 
 | ||||
| 
 | ||||
| # lk-jwt-service {#lkjwt} | ||||
| 
 | ||||
| lk-jwt-service is a small Go program that handles authorization tokens for use with LiveKit. | ||||
| You'll need a Go compiler, but the one Debian provides is too old (at the time | ||||
| of writing this, at least), so we'll install the latest one manually. Check | ||||
| [the Go website](https://go.dev/dl/) to see which version is the latest, at | ||||
| the time of writing it's 1.23.3, so we'll install that: | ||||
| 
 | ||||
| ``` | ||||
| wget https://go.dev/dl/go1.23.3.linux-amd64.tar.gz | ||||
| tar xvfz go1.23.3.linux-amd64.tar.gz | ||||
| cd go/bin | ||||
| export PATH=`pwd`:$PATH | ||||
| cd | ||||
| ``` | ||||
| 
 | ||||
| This means you now have the latest Go compiler in your path, but it's not | ||||
| installed system-wide. If you want that, copy the whole `go` directory to | ||||
| `/usr/local` and add `/usr/local/go/bin` to everybody's $PATH. | ||||
| 
 | ||||
| Get the latest lk-jwt-service source code and comile it (preferably *NOT* as root): | ||||
| 
 | ||||
| ``` | ||||
| git clone https://github.com/element-hq/lk-jwt-service.git | ||||
| cd lk-jwt-service | ||||
| go build -o lk-jwt-service | ||||
| ``` | ||||
| 
 | ||||
| Now, compile: | ||||
| 
 | ||||
| ``` | ||||
| cd lk-jwt-service | ||||
| go build -o lk-jwt-service | ||||
| ``` | ||||
| 
 | ||||
| Copy and chown the binary to `/usr/local/sbin` (yes: as root): | ||||
| 
 | ||||
| ``` | ||||
| cp ~user/lk-jwt-service/lk-jwt-service /usr/local/sbin | ||||
| chown root:root /usr/local/sbin/lk-jwt-service | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ## Systemd | ||||
| 
 | ||||
| Create a service file for systemd, something like this: | ||||
| 
 | ||||
| ``` | ||||
| # This thing does authorization for Element Call | ||||
| 
 | ||||
| [Unit] | ||||
| Description=LiveKit JWT Service | ||||
| After=network.target | ||||
| 
 | ||||
| [Service] | ||||
| Restart=always | ||||
| User=www-data | ||||
| Group=www-data | ||||
| WorkingDirectory=/etc/lk-jwt-service | ||||
| EnvironmentFile=/etc/lk-jwt-service/config | ||||
| ExecStart=/usr/local/sbin/lk-jwt-service | ||||
| 
 | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
| ``` | ||||
| 
 | ||||
| ## Configuration {#jwtconfig} | ||||
| 
 | ||||
| We read the options from `/etc/lk-jwt-service/config`, | ||||
| which we make read-only for group `www-data` and non-accessible by anyone | ||||
| else. | ||||
| 
 | ||||
| ``` | ||||
| mkdir /etc/lk-jwt-service | ||||
| vi /etc/lk-jwt-service/config | ||||
| chgrp -R root:www-data /etc/lk-jwt-service | ||||
| chmod 750 /etc/lk-jwt-service | ||||
| ``` | ||||
| 
 | ||||
| This is what you should put into that config file, | ||||
| `/etc/lk-jwt-service/config`. The `LIVEKIT_SECRET` and `LIVEKIT_KEY` are the | ||||
| ones you created while [configuring LiveKit](#keysecret). | ||||
| 
 | ||||
| ``` | ||||
| LIVEKIT_URL=wss://livekit.example.com | ||||
| LIVEKIT_SECRET=xxx | ||||
| LIVEKIT_KEY=xxx | ||||
| LK_JWT_PORT=8080 | ||||
| ``` | ||||
| 
 | ||||
| Change the permission accordingly: | ||||
| 
 | ||||
| ``` | ||||
| chown root:www-data /etc/lk-jwt-service/config | ||||
| chmod 640 /etc/lk-jwt-service/config | ||||
| ``` | ||||
| 
 | ||||
| Now enable and start this thing: | ||||
| 
 | ||||
| ``` | ||||
| systemctl enable --now lk-jwt-service | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Element Call widget {#widget} | ||||
| 
 | ||||
| This is a Node.js thingy, so start by installing yarn. Unfortunately both npm | ||||
| and `yarnpkg` in Debian are antique, so we need to update them after installation. | ||||
| Install Node.js and upgrade everything. Do not do this as root, we'll only | ||||
| need to "compile" Element Call once. | ||||
| 
 | ||||
| See [the Node.js | ||||
| website](https://nodejs.org/en/download/package-manager/current) for | ||||
| instructions. | ||||
| 
 | ||||
| 
 | ||||
| ``` | ||||
| curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.0/install.sh | bash | ||||
| ``` | ||||
| 
 | ||||
| Exit and login again to set some environment variables (yes, the installation | ||||
| changes .bashrc). Then install and upgrade: | ||||
|   | ||||
| ``` | ||||
| nvm install 23 | ||||
| sudo apt install yarnpkg | ||||
| /usr/share/nodejs/yarn/bin/yarn set version stable | ||||
| /usr/share/nodejs/yarn/bin/yarn install | ||||
| ``` | ||||
| 
 | ||||
| Yes, this whole Node.js, yarn and npm thing is a mess. Better documentation | ||||
| could be written, but for now this will have to do. | ||||
| 
 | ||||
| Now clone the Element Call repository and "compile" stuff (again: not as | ||||
| root): | ||||
| 
 | ||||
| ``` | ||||
| git clone https://github.com/element-hq/element-call.git | ||||
| cd element-call | ||||
| /usr/share/nodejs/yarn/bin/yarn | ||||
| /usr/share/nodejs/yarn/bin/yarn build | ||||
| ``` | ||||
| 
 | ||||
| If it successfully compiles (warnings are more or less ok, errors aren't), you will | ||||
| find the whole shebang under "dist". Copy that to `/var/www/element-call` and point | ||||
| nginx to it ([see nginx](../nginx#callwidget)). | ||||
| 
 | ||||
| 
 | ||||
| ## Configuring | ||||
| 
 | ||||
| It needs a tiny bit of configuring. The default configuration under `config/config.sample.json` | ||||
| is a good place to start, copy it to `/etc/element-call` and change where | ||||
| necessary: | ||||
| 
 | ||||
| ``` | ||||
| { | ||||
|   "default_server_config": { | ||||
|       "m.homeserver": { | ||||
|           "base_url": "https://matrix.example.com", | ||||
|           "server_name": "example.com" | ||||
|       } | ||||
|   }, | ||||
| 
 | ||||
|   "livekit": { | ||||
|     "livekit_service_url": "https://livekit.example.com" | ||||
|   }, | ||||
| 
 | ||||
|   "features": { | ||||
|     "feature_use_device_session_member_events": true | ||||
|   }, | ||||
| 
 | ||||
|   "eula": "https://www.example.com/online-EULA.pdf" | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Now tell the clients about this widget.  Create | ||||
| `.well-known/element/element.json`, which is opened by Element Web, Element Desktop | ||||
| and ElementX to find the Element Call widget. It should look this: | ||||
| 
 | ||||
| ``` | ||||
| { | ||||
|     "call": { | ||||
|         "widget_url": "https://call.example.com" | ||||
|     } | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
|  | @ -1,6 +0,0 @@ | |||
| { | ||||
|     "call": | ||||
|     { | ||||
|         "widget_url": "https://call.example.com" | ||||
|     } | ||||
| } | ||||
|  | @ -1,70 +0,0 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Element-web | ||||
| 
 | ||||
| Element-web is the webinterface, Element in a browser. You'll find the source | ||||
| and [documentation on installing and | ||||
| configuring](https://github.com/element-hq/element-web/blob/develop/docs/install.md) | ||||
| on Github. | ||||
| 
 | ||||
| You should never run Element-web on the same FQDN as your Synapse-server, | ||||
| because of XSS problems. So start by defining a new FQDN for where you will | ||||
| publish Element-web, and get a certificate for that (don't forget to | ||||
| [automatically reload nginx after the certificate renewal](../nginx/README.md#certrenew)). | ||||
| 
 | ||||
| We'll use `element.example.com` here. | ||||
| 
 | ||||
| 
 | ||||
| # Installing on Debian {#debian} | ||||
| 
 | ||||
| Installing it on Debian is very easy indeed: | ||||
| 
 | ||||
| ``` | ||||
| wget -O /usr/share/keyrings/element-io-archive-keyring.gpg https://packages.element.io/debian/element-io-archive-keyring.gpg | ||||
| echo "deb [signed-by=/usr/share/keyrings/element-io-archive-keyring.gpg] https://packages.element.io/debian/ default main" | | ||||
|               tee /etc/apt/sources.list.d/element-io.list | ||||
| apt update | ||||
| apt install element-web | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Configuration {#configuration} | ||||
| 
 | ||||
| Configuring is done in `config.json`, which needs to go into `/etc/element-web` | ||||
| in a Debian install. See the [documentation on | ||||
| Github](https://github.com/element-hq/element-web/blob/develop/docs/config.md). | ||||
| 
 | ||||
| The most important thing to change is the `default_server_config`. Make sure | ||||
| it's something like this: | ||||
| 
 | ||||
| ``` | ||||
| "default_server_config": { | ||||
|     "m.homeserver": { | ||||
|         "base_url": "https://matrix.example.com", | ||||
|         "server_name": "example.com" | ||||
|     } | ||||
| }, | ||||
| ``` | ||||
| 
 | ||||
| Of course, substitute the correct domain and server name. | ||||
| 
 | ||||
| 
 | ||||
| # Browser notes {#browsernotes} | ||||
| 
 | ||||
| Element-web runs in the browser, on JavaScript. Yours truly found out that | ||||
| running [JShelter](https://jshelter.org/) throws a spanner in the works, so | ||||
| you'll have to disable it for the URL you publish Element-web. | ||||
| 
 | ||||
| Also, Element-web is rather dependent on the version of your browser, so make | ||||
| sure you keep yours up-to-date. Debian users, who run "Firefox ESR" should | ||||
| know support for that is on a best effort basis, you might want to consider | ||||
| using the "real" Firefox. [Debian packages are | ||||
| available](https://support.mozilla.org/en-US/kb/install-firefox-linux#w_install-firefox-deb-package-for-debian-based-distributions-recommended). | ||||
| 
 | ||||
| Element Web uses "workers", that are not installed in private windows. One | ||||
| thing that won't work in a private window, is downloading (i.e. displaying) | ||||
| images. If you don't see avatars and get "failed to download" messages, check | ||||
| if you're running Element Web in a private window. | ||||
|  | @ -1,25 +1,13 @@ | |||
| # Firewall | ||||
| 
 | ||||
| Several ports need to be opened in the firewall, this is a list of all ports | ||||
| that are needed by the components we describe in this document. | ||||
| This page is mostly a placeholder for now, but configuration of the firewall | ||||
| is -of course- very important. | ||||
| 
 | ||||
| Those for nginx are necessary for Synapse to work, the ones for coturn and | ||||
| LiveKit only need to be opened if you run those servers. | ||||
| First idea: the ports that need to be opened are: | ||||
| 
 | ||||
| 
 | ||||
| | Port(s) / range | IP version | Protocol | Application            | | ||||
| | :-------------: | :--------: | :------: | :--------------------- | | ||||
| | 80, 443         | IPv4/IPv6  | TCP      | nginx, reverse proxy   | | ||||
| | 8443            | IPv4/IPv6  | TCP      | nginx, federation      | | ||||
| | 3478            | IPv4       | UDP      | LiveKit TURN           | | ||||
| | 5349            | IPv4       | TCP      | LiveKit TURN TLS       | | ||||
| | 7881            | IPv4/IPv6  | TCP      | LiveKit RTC            | | ||||
| | 50000-60000     | IPv4/IPv6  | TCP/UDP  | LiveKit RTC            | | ||||
| | 3480            | IPv4       | TCP/UDP  | coturn TURN            | | ||||
| | 5351            | IPv4       | TCP/UDP  | coturn TURN TLS        | | ||||
| | 40000-49999     | IPv4       | TCP/UDP  | coturn RTC             | | ||||
| 
 | ||||
| 
 | ||||
| The ports necessary for TURN depend very much on the specific configuration of | ||||
| [coturn](../coturn#configuration) and/or [LiveKit](../element-call#livekit). | ||||
| | Port(s) / range | Protocol | Application | | ||||
| | :--: | :--: | :-- | | ||||
| | 80, 443 | TCP | Reverse proxy | | ||||
| | 8443 | TCP | Synapse, federation | | ||||
| 
 | ||||
|  |  | |||
|  | @ -25,48 +25,16 @@ easy: | |||
| apt install nginx python3-certbot-nginx | ||||
| ``` | ||||
| 
 | ||||
| Get your certificate for the base domain (which is probably not the machine on which | ||||
| we're going to run Synapse): | ||||
| Get your certificate: | ||||
| 
 | ||||
| ``` | ||||
| certbot certonly --nginx --agree-tos -m system@example.com --non-interactive -d example.com | ||||
| ``` | ||||
| 
 | ||||
| Get one for the machine on which we are going to run Synapse too: | ||||
| 
 | ||||
| ``` | ||||
| certbot certonly --nginx --agree-tos -m system@example.com --non-interactive -d matrix.example.com | ||||
| certbot certonly --nginx --agree-tos -m systeemmail@procolix.com --non-interactive -d matrixdev.procolix.com | ||||
| ``` | ||||
| 
 | ||||
| Substitute the correct e-mailaddress and FQDN, or course. | ||||
| 
 | ||||
| 
 | ||||
| ## Automatic renewal {#certrenew} | ||||
| 
 | ||||
| Certificates have a limited lifetime, and need to be updated every once in a | ||||
| while. This should be done automatically by Certbot, see if `systemctl | ||||
| list-timers` lists `certbot.timer`. | ||||
| 
 | ||||
| However, renewing the certificate means you'll have to restart the software | ||||
| that's using it. We have 2 or 3 pieces of software that use certificates: | ||||
| [coturn](../coturn) and/or [LiveKit](../element-call#livekit), and [nginx](../nginx). | ||||
| 
 | ||||
| Coturn/LiveKit are special with regards to the certificate, see their | ||||
| respective pages. For nginx it's pretty easy: tell Letsencrypt to restart it | ||||
| after a renewal. | ||||
| 
 | ||||
| You do this by adding this line to the `[renewalparams]` in | ||||
| `/etc/letsencrypt/renewal/<certificate name>.conf`: | ||||
| 
 | ||||
| ``` | ||||
| renew_hook = systemctl try-reload-or-restart nginx | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Configuration of domain name {#configdomain} | ||||
| 
 | ||||
| Let's start with the configuration on the webserver that runs on the domain | ||||
| name itself, in this case `example.com`. | ||||
| # Configuration | ||||
| 
 | ||||
| Almost all traffic should be encrypted, so a redirect from http to https seems | ||||
| like a good idea. | ||||
|  | @ -85,22 +53,23 @@ server { | |||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
| 
 | ||||
|     ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; | ||||
|     ssl_certificate /etc/letsencrypt/live/matrixdev.procolix.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/matrixdev.procolix.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
| 
 | ||||
|     server_name example.com; | ||||
|     server_name matrixdev.procolix.com; | ||||
| 
 | ||||
|     location /.well-known/matrix/client { | ||||
|        return 200 '{ | ||||
|           "m.homeserver": {"base_url": "https://matrix.example.com"}, | ||||
|           "m.homeserver": {"base_url": "https://vm02199.procolix.com"}, | ||||
|           "org.matrix.msc3575.proxy": {"url": "https://vm02199.procolix.com"} | ||||
|        }'; | ||||
|        default_type application/json; | ||||
|     } | ||||
| 
 | ||||
|     location /.well-known/matrix/server { | ||||
|        return 200 '{"m.server": "matrix.example.com"}'; | ||||
|        return 200 '{"m.server": "vm02199.procolix.com"}'; | ||||
|        default_type application/json; | ||||
|     } | ||||
| 
 | ||||
|  | @ -110,8 +79,8 @@ server { | |||
|       } | ||||
|     } | ||||
| 
 | ||||
|     access_log /var/log/nginx/example_com-access.log; | ||||
|     error_log /var/log/nginx/example_com-error.log; | ||||
|     access_log /var/log/nginx/matrixdev-access.log; | ||||
|     error_log /var/log/nginx/matrixdev-error.log; | ||||
| 
 | ||||
| } | ||||
| ``` | ||||
|  | @ -121,16 +90,9 @@ This defines a server that listens on both http and https. It hands out two | |||
| http is forwarded to https. | ||||
| 
 | ||||
| Be sure to substitute the correct values for `server_name`, `base_url` and the | ||||
| certificate files (and [renew the certificate](#renewcert)). | ||||
| certificate files. | ||||
| 
 | ||||
| See this [full configuration example](domain.conf) with some extra stuff. | ||||
| 
 | ||||
| 
 | ||||
| # Configuration of the reverse proxy | ||||
| 
 | ||||
| For the actual proxy in front of Synapse, this is what you need: forward ports | ||||
| 443 and 8448 to Synapse, listening on localhost, and add a few headers so | ||||
| Synapse know's who's on the other side of the line. | ||||
| For the actual proxy in front of Synapse, this is what you need: | ||||
| 
 | ||||
| ``` | ||||
| server { | ||||
|  | @ -141,12 +103,12 @@ server { | |||
| 	listen 8448 ssl default_server; | ||||
| 	listen [::]:8448 ssl default_server; | ||||
| 
 | ||||
| 	ssl_certificate /etc/letsencrypt/live/matrix.example.com/fullchain.pem; | ||||
| 	ssl_certificate_key /etc/letsencrypt/live/matrix.example.com/privkey.pem; | ||||
| 	ssl_certificate /etc/letsencrypt/live/vm02199.procolix.com/fullchain.pem; | ||||
| 	ssl_certificate_key /etc/letsencrypt/live/vm02199.procolix.com/privkey.pem; | ||||
| 	include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
| 	ssl_dhparam /etc/ssl/dhparams.pem; | ||||
| 
 | ||||
| 	server_name matrix.example.com; | ||||
| 	server_name vm02199.procolix.com; | ||||
| 
 | ||||
| 	location ~ ^(/_matrix|/_synapse/client) { | ||||
| 		proxy_pass http://localhost:8008; | ||||
|  | @ -163,202 +125,6 @@ server { | |||
| Again, substitute the correct values. Don't forget to open the relevant ports | ||||
| in the firewall. Ports 80 and 443 may already be open, 8448 is probably not. | ||||
| 
 | ||||
| This is a very, very basic configuration; just enough to give us a working | ||||
| service. See this [complete example](revproxy.conf) which also includes | ||||
| [Draupnir](../draupnir) and a protected admin endpoint. | ||||
| 
 | ||||
| # Element Web | ||||
| 
 | ||||
| You can host the webclient on a different machine, but we'll run it on the | ||||
| same one in this documentation. You do need a different FQDN however, you | ||||
| can't host it under the same name as Synapse, such as: | ||||
| ``` | ||||
| https://matrix.example.com/element-web | ||||
| ``` | ||||
| So you'll need to create an entry in DNS and get a TLS-certificate for it (as | ||||
| mentioned in the [checklist](../checklist.md)). | ||||
| 
 | ||||
| Other than that, configuration is quite simple. We'll listen on both http and | ||||
| https, and redirect http to https: | ||||
| 
 | ||||
| ``` | ||||
| server { | ||||
|     listen 80; | ||||
|     listen [::]:80; | ||||
|     listen 443 ssl http2; | ||||
|     listen [::]:443 ssl http2; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/element.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/element.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name element.example.com; | ||||
|      | ||||
|     location / { | ||||
|         if ($scheme = http) { | ||||
|             return 301 https://$host$request_uri; | ||||
|         } | ||||
|         add_header X-Frame-Options SAMEORIGIN; | ||||
|         add_header X-Content-Type-Options nosniff; | ||||
|         add_header X-XSS-Protection "1; mode=block"; | ||||
|         add_header Content-Security-Policy "frame-ancestors 'self'"; | ||||
|     } | ||||
|      | ||||
|     root /usr/share/element-web; | ||||
|     index index.html; | ||||
|      | ||||
|     access_log /var/log/nginx/elementweb-access.log; | ||||
|     error_log /var/log/nginx/elementweb-error.log; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| This assumes Element Web is installed under `/usr/share/element-web`, as done | ||||
| by the Debian package provided by Element.io. | ||||
| 
 | ||||
| # Synapse-admin {#synapse-admin} | ||||
| 
 | ||||
| If you also [install Synapse-Admin](../synapse-admin), you'll want to create | ||||
| another vhost, something like this: | ||||
| 
 | ||||
| ``` | ||||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/admin.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/admin.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name admin.example.com; | ||||
|      | ||||
|     root /var/www/synapse-admin; | ||||
|      | ||||
|     access_log /var/log/nginx/admin-access.log; | ||||
|     error_log /var/log/nginx/admin-error.log; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| You'll need an SSL certificate for this, of course. But you'll also need to | ||||
| give it access to the `/_synapse/admin` endpoint in Synapse. | ||||
| 
 | ||||
| You don't want this endpoint to be available for just anybody on the Internet, | ||||
| so restrict access to the IP-addresses from which you expect to use | ||||
| Synapse-Admin. | ||||
| 
 | ||||
| In `/etc/nginx/sites-available/synapse` you want to add this bit: | ||||
| 
 | ||||
| ``` | ||||
| location ~ ^/_synapse/admin { | ||||
|     allow 127.0.0.1; | ||||
|     allow ::1; | ||||
|     allow 111.222.111.222; | ||||
|     allow dead:beef::/64; | ||||
|     deny all; | ||||
|      | ||||
|     proxy_pass http://localhost:8008; | ||||
|     proxy_set_header X-Forwarded-For $remote_addr; | ||||
|     proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     proxy_set_header Host $host; | ||||
|     client_max_body_size 50M; | ||||
|     proxy_http_version 1.1; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| This means access to `/_synapse/admin` is only allowed for the addresses | ||||
| mentioned, but will be forwarded to Synapse in exactly the same way as | ||||
| "normal" requests. | ||||
| 
 | ||||
| 
 | ||||
| # LiveKit {#livekit} | ||||
| 
 | ||||
| If you run an SFU for Element Call, you need a virtual host for LiveKit. Make | ||||
| sure you install, configure and run [Element Call LiveKit](../element-call#livekit). | ||||
| Then create a virtual host much like this: | ||||
| 
 | ||||
| ``` | ||||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/livekit.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/livekit.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name livekit.example.com; | ||||
|      | ||||
|     # This is lk-jwt-service | ||||
|     location ~ ^(/sfu/get|/healthz) { | ||||
|         proxy_pass http://[::1]:8080; | ||||
|         proxy_set_header Host $host; | ||||
|         proxy_set_header X-Forwarded-Server $host; | ||||
|         proxy_set_header X-Real-IP $remote_addr; | ||||
|         proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | ||||
|         proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     } | ||||
|      | ||||
|     location / { | ||||
|         proxy_pass http://[::1]:7880; | ||||
|         proxy_set_header Connection "upgrade"; | ||||
|         proxy_set_header Upgrade $http_upgrade; | ||||
|          | ||||
|         proxy_set_header Host $host; | ||||
|         proxy_set_header X-Forwarded-Server $host; | ||||
|         proxy_set_header X-Real-IP $remote_addr; | ||||
|         proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | ||||
|         proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     } | ||||
|      | ||||
|     access_log /var/log/nginx/livekit-access.log; | ||||
|     error_log /var/log/nginx/livekit-error.log; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Element Call widget {#callwidget} | ||||
| 
 | ||||
| If you self-host the [Element Call widget](../element-call#widget), this | ||||
| should be the configuration to publish that: | ||||
| 
 | ||||
| ``` | ||||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/call.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/call.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name call.example.com; | ||||
|      | ||||
|     root /var/www/element-call; | ||||
|      | ||||
|     location /assets { | ||||
|         add_header Cache-Control "public, immutable, max-age=31536000"; | ||||
|     } | ||||
|      | ||||
|     location /apple-app-site-association { | ||||
|         default_type application/json; | ||||
|     } | ||||
|      | ||||
|     location /^config.json$ { | ||||
|         alias public/config.json; | ||||
|         default_type application/json; | ||||
|     } | ||||
|      | ||||
|     location / { | ||||
|         try_files $uri /$uri /index.html; | ||||
|         add_header Cache-Control "public, max-age=30, stale-while-revalidate=30"; | ||||
|     } | ||||
|      | ||||
|     access_log /var/log/nginx/call-access.log; | ||||
|     error_log /var/log/nginx/call-error.log; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Firewall | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,34 +0,0 @@ | |||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/call.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/call.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name call.example.com; | ||||
|      | ||||
|     root /var/www/element-call; | ||||
|      | ||||
|     location /assets { | ||||
|         add_header Cache-Control "public, immutable, max-age=31536000"; | ||||
|     } | ||||
|      | ||||
|     location /apple-app-site-association { | ||||
|         default_type application/json; | ||||
|     } | ||||
|      | ||||
|     location /^config.json$ { | ||||
|         alias public/config.json; | ||||
|         default_type application/json; | ||||
|     } | ||||
|      | ||||
|     location / { | ||||
|         try_files $uri /$uri /index.html; | ||||
|         add_header Cache-Control "public, max-age=30, stale-while-revalidate=30"; | ||||
|     } | ||||
|      | ||||
|     access_log /var/log/nginx/call-access.log; | ||||
|     error_log /var/log/nginx/call-error.log; | ||||
| } | ||||
|  | @ -1,61 +0,0 @@ | |||
| server { | ||||
|     listen 80; | ||||
|     listen [::]:80; | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name example.com; | ||||
|      | ||||
|     location /.well-known/matrix/client { | ||||
|         return 200 '{ | ||||
|             "m.homeserver": {"base_url": "https://matrix.example.com"}, | ||||
|             "org.matrix.msc3575.proxy": {"url": "https://matrix.example.com"}, | ||||
|             "org.matrix.msc4143.rtc_foci":[ | ||||
|             	{"type": "livekit", | ||||
|             	"livekit_service_url": "https://livekit.example.com"} | ||||
|             ] | ||||
|         }'; | ||||
|         default_type application/json; | ||||
|         add_header 'Access-Control-Allow-Origin' '*'; | ||||
|     } | ||||
|      | ||||
|     location /.well-known/matrix/server { | ||||
|         return 200 '{"m.server": "matrix.example.com"}'; | ||||
|         default_type application/json; | ||||
|     } | ||||
| 
 | ||||
|     location /.well-known/matrix/support { | ||||
|         return 200 '{ "contacts": | ||||
|             [ | ||||
|                 { "email_address": "admin@example.com", | ||||
|                 "matrix_id": "@admin:example.com", | ||||
|                 "role": "m.role.admin" }, | ||||
|                 { "email_address": "security@example.com", | ||||
|                 "matrix_id": "@john:example.com", | ||||
|                 "role": "m.role.security" } | ||||
|             ], | ||||
|                 "support_page": "https://www.example.com/matrix-support" | ||||
|         }'; | ||||
|         default_type application/json; | ||||
|     } | ||||
| 
 | ||||
|      | ||||
|     location /.well-known/element/element.json { | ||||
|         return 200 '{"call": {"widget_url": "https://call.example.com"}}'; | ||||
|         default_type application/json; | ||||
|     } | ||||
|      | ||||
|     location / { | ||||
|         if ($scheme = http) { | ||||
|             return 301 https://$host$request_uri; | ||||
|         } | ||||
|     } | ||||
|      | ||||
|     access_log /var/log/nginx/example-access.log; | ||||
|     error_log /var/log/nginx/example-error.log; | ||||
| } | ||||
|  | @ -1,29 +0,0 @@ | |||
| server { | ||||
|     listen 80; | ||||
|     listen [::]:80; | ||||
|     listen 443 ssl http2; | ||||
|     listen [::]:443 ssl http2; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/element.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/element.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name element.example.com; | ||||
|      | ||||
|     location / { | ||||
|         if ($scheme = http) { | ||||
|             return 301 https://$host$request_uri; | ||||
|         } | ||||
|         add_header X-Frame-Options SAMEORIGIN; | ||||
|         add_header X-Content-Type-Options nosniff;  | ||||
|         add_header X-XSS-Protection "1; mode=block"; | ||||
|         add_header Content-Security-Policy "frame-ancestors 'self'"; | ||||
|     } | ||||
|      | ||||
|     root /usr/share/element-web; | ||||
|     index index.html; | ||||
|      | ||||
|     access_log /var/log/nginx/elementweb-access.log; | ||||
|     error_log /var/log/nginx/elementweb-error.log; | ||||
| } | ||||
|  | @ -1,37 +0,0 @@ | |||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/livekit.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/livekit.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name livekit.example.com; | ||||
|      | ||||
|     # This is lk-jwt-service | ||||
|     location ~ ^(/sfu/get|/healthz) { | ||||
|         proxy_pass http://[::1]:8080; | ||||
|         proxy_set_header Host $host; | ||||
|         proxy_set_header X-Forwarded-Server $host; | ||||
|         proxy_set_header X-Real-IP $remote_addr; | ||||
|         proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | ||||
|         proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     } | ||||
|      | ||||
|     location / { | ||||
|         proxy_pass http://[::1]:7880; | ||||
|         proxy_set_header Connection "upgrade"; | ||||
|         proxy_set_header Upgrade $http_upgrade; | ||||
|         #add_header Access-Control-Allow-Origin "*" always; | ||||
|          | ||||
|         proxy_set_header Host $host; | ||||
|         proxy_set_header X-Forwarded-Server $host; | ||||
|         proxy_set_header X-Real-IP $remote_addr; | ||||
|         proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | ||||
|         proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     } | ||||
|      | ||||
|     access_log /var/log/nginx/livekit-access.log; | ||||
|     error_log /var/log/nginx/livekit-error.log; | ||||
| } | ||||
|  | @ -1,85 +0,0 @@ | |||
| server { | ||||
| 	listen 443 ssl; | ||||
| 	listen [::]:443 ssl; | ||||
| 
 | ||||
| 	# For the federation port | ||||
| 	listen 8448 ssl; | ||||
| 	listen [::]:8448 ssl; | ||||
| 
 | ||||
| 	ssl_certificate /etc/letsencrypt/live/matrix.example.com/fullchain.pem; | ||||
| 	ssl_certificate_key /etc/letsencrypt/live/matrix.example.com/privkey.pem; | ||||
| 	include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
| 	ssl_dhparam /etc/ssl/dhparams.pem; | ||||
| 
 | ||||
| 	server_name matrix.example.com; | ||||
| 
 | ||||
| 	# Abuse reports get forwarded to Draupnir, listening on port 8082 | ||||
| 	location ~ ^/_matrix/client/(r0|v3)/rooms/([^/]*)/report/(.*)$ { | ||||
| 		# The r0 endpoint is deprecated but still used by many clients. | ||||
| 		# As of this writing, the v3 endpoint is the up-to-date version. | ||||
| 		 | ||||
| 		# Alias the regexps, to ensure that they're not rewritten. | ||||
| 		set $room_id $2; | ||||
| 		set $event_id $3; | ||||
| 		proxy_pass http://[::1]:8082/api/1/report/$room_id/$event_id; | ||||
| 	} | ||||
| 
 | ||||
| 	# Reports that need to reach Synapse (not really sure if this is used) | ||||
| 	location /_synapse/admin/v1/event_reports { | ||||
| 		proxy_pass http://localhost:8008; | ||||
| 		proxy_set_header X-Forwarded-For $remote_addr; | ||||
| 		proxy_set_header X-Forwarded-Proto $scheme; | ||||
| 		proxy_set_header Host $host; | ||||
| 		client_max_body_size 50M; | ||||
| 		proxy_http_version 1.1; | ||||
| 	} | ||||
| 	location ~ ^/_synapse/admin/v1/rooms/([^/]*)/context/(.*)$ { | ||||
| 		set $room_id $2; | ||||
| 		set $event_id $3; | ||||
| 		proxy_pass http://localhost:8008/_synapse/admin/v1/rooms/$room_id/context/$event_id; | ||||
| 		proxy_set_header X-Forwarded-For $remote_addr; | ||||
| 		proxy_set_header X-Forwarded-Proto $scheme; | ||||
| 		proxy_set_header Host $host; | ||||
| 		client_max_body_size 50M; | ||||
| 		proxy_http_version 1.1; | ||||
| 	} | ||||
| 
 | ||||
| 	# If you want the server version to be public: | ||||
| 	location ~ ^/_synapse/admin/v1/server_version$ { | ||||
| 		proxy_pass http://localhost:8008; | ||||
| 		proxy_set_header X-Forwarded-For $remote_addr; | ||||
| 		proxy_set_header X-Forwarded-Proto $scheme; | ||||
| 		proxy_set_header Host $host; | ||||
| 		client_max_body_size 50M; | ||||
| 		proxy_http_version 1.1; | ||||
| 	} | ||||
| 		 | ||||
| 	# The rest of the admin endpoint shouldn't be public | ||||
| 	location ~ ^/_synapse/admin { | ||||
| 		allow 127.0.0.1; | ||||
| 		allow ::1; | ||||
| 		allow 111.222.111.222; | ||||
| 		allow dead:beef::/48; | ||||
| 		deny all; | ||||
| 
 | ||||
| 		proxy_pass http://localhost:8008; | ||||
| 		proxy_set_header X-Forwarded-For $remote_addr; | ||||
| 		proxy_set_header X-Forwarded-Proto $scheme; | ||||
| 		proxy_set_header Host $host; | ||||
| 		client_max_body_size 50M; | ||||
| 		proxy_http_version 1.1; | ||||
| 	} | ||||
| 
 | ||||
| 	location ~ ^(/_matrix|/_synapse/client) { | ||||
| 		proxy_pass http://localhost:8008; | ||||
| 		proxy_set_header X-Forwarded-For $remote_addr; | ||||
| 		proxy_set_header X-Forwarded-Proto $scheme; | ||||
| 		proxy_set_header Host $host; | ||||
| 		client_max_body_size 50M; | ||||
| 		proxy_http_version 1.1; | ||||
| 	} | ||||
| 
 | ||||
| 	access_log /var/log/nginx/matrix-access.log; | ||||
| 	error_log /var/log/nginx/matrix-error.log; | ||||
| } | ||||
| 
 | ||||
|  | @ -1,16 +0,0 @@ | |||
| server { | ||||
|     listen 443 ssl; | ||||
|     listen [::]:443 ssl; | ||||
|      | ||||
|     ssl_certificate /etc/letsencrypt/live/admin.example.com/fullchain.pem; | ||||
|     ssl_certificate_key /etc/letsencrypt/live/admin.example.com/privkey.pem; | ||||
|     include /etc/letsencrypt/options-ssl-nginx.conf; | ||||
|     ssl_dhparam /etc/ssl/dhparams.pem; | ||||
|      | ||||
|     server_name admin.example.com; | ||||
|      | ||||
|     root /var/www/synapse-admin; | ||||
|      | ||||
|     access_log /var/log/nginx/admin-access.log; | ||||
|     error_log /var/log/nginx/admin-error.log; | ||||
| } | ||||
|  | @ -1,397 +0,0 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Reverse proxy for Synapse with workers | ||||
| 
 | ||||
| Changing nginx's configuration from a reverse proxy for a normal, monolithic | ||||
| Synapse to one for a Synapse that uses workers, is a big thing: quite a lot has to | ||||
| be changed. | ||||
| 
 | ||||
| As mentioned in [Synapse with workers](../../synapse/workers/README.md#synapse), | ||||
| we're changing the "backend" from network sockets to UNIX sockets. | ||||
| 
 | ||||
| Because we're going to have to forward a lot of specific requests to all kinds | ||||
| of workers, we'll split the configuration into a few bits: | ||||
| 
 | ||||
| * all `proxy_forward` settings | ||||
| * all `location` definitions | ||||
| * maps that define variables | ||||
| * upstreams that point to the correct socket(s) with the correct settings | ||||
| * settings for private access | ||||
| * connection optimizations | ||||
| 
 | ||||
| Some of these go into `/etc/nginx/conf.d` because they are part of the | ||||
| configuration of nginx itself, others go into `/etc/nginx/snippets` because we | ||||
| need to include them several times in different places. | ||||
| 
 | ||||
| **Important consideration** | ||||
| 
 | ||||
| This part isn't a quick "put these files in place and you're done": a | ||||
| worker-based Synapse is tailor-made, there's no one-size-fits-all. This | ||||
| documentation gives hints and examples, but in the end it's you who has to | ||||
| decide what types of workers to use and how many, all depending on your | ||||
| specific use case and the available hardware. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| # Optimizations | ||||
| 
 | ||||
| In the quest for speed, we are going to tweak several settings in nginx. To | ||||
| keep things manageable, most of those tweaks go into separate configuration | ||||
| files that are either automatically included (those under `/etc/nginx/conf.d`) | ||||
| or explicitly where we need them (those under `/etc/nginx/snippets`). | ||||
| 
 | ||||
| Let's start with a few settings that affect nginx as a whole. Edit these | ||||
| options in `/etc/nginx/nginx.conf`: | ||||
| 
 | ||||
| ``` | ||||
| pcre_jit on; | ||||
| worker_rlimit_nofile 8192; | ||||
| worker_connections 4096; | ||||
| multi_accept off; | ||||
| gzip_comp_level 2; | ||||
| gzip_types application/javascript application/json application/x-javascript application/xml application/xml+rss image/svg+xml text/css text/javascript text/plain text/xml; | ||||
| gzip_min_length 1000; | ||||
| gzip_disable "MSIE [1-6]\."; | ||||
| ``` | ||||
| 
 | ||||
| We're going to use lots of regular expressions in our config, `pcre_jit on` | ||||
| speeds those up considerably. Workers get 8K open files, and we want 4096 | ||||
| workers instead of the default 768. Workers can only accept one connection, | ||||
| which is (in almost every case) proxy_forwarded, so we set `multi_accept off`. | ||||
| 
 | ||||
| We change `gzip_comp_level` from 6 to 2, we expand the list of content that is | ||||
| to be gzipped, and don't zip anything shorter than 1000 characters, instead of | ||||
| the default 20. MSIE can take a hike... | ||||
| 
 | ||||
| These are tweaks for the connection, save this in `/etc/ngnix/conf.d/conn_optimize.conf`. | ||||
| 
 | ||||
| ``` | ||||
| client_body_buffer_size 32m; | ||||
| client_header_buffer_size 32k; | ||||
| client_max_body_size 1g; | ||||
| http2_max_concurrent_streams 128; | ||||
| keepalive_timeout 65; | ||||
| keepalive_requests 100; | ||||
| large_client_header_buffers 4 16k; | ||||
| server_names_hash_bucket_size 128; | ||||
| tcp_nodelay on; | ||||
| server_tokens off; | ||||
| ``` | ||||
| 
 | ||||
| We set a few proxy settings that we use in proxy_forwards other than to our | ||||
| workers, save this to `conf.d/proxy_optimize.conf`: | ||||
| 
 | ||||
| ``` | ||||
| proxy_buffer_size 128k; | ||||
| proxy_buffers 4 256k; | ||||
| proxy_busy_buffers_size 256k; | ||||
| ``` | ||||
| 
 | ||||
| For every `proxy_forward` to our workers, we want to configure several settings, | ||||
| and because we don't want to include the same list of settings every time, we put | ||||
| all of them in one snippet of code, that we can include every time we need it. | ||||
| 
 | ||||
| Create `/etc/nginx/snippets/proxy.conf` and put this in it: | ||||
| 
 | ||||
| ``` | ||||
| proxy_connect_timeout 2s; | ||||
| proxy_buffering off; | ||||
| proxy_http_version 1.1; | ||||
| proxy_read_timeout 3600s; | ||||
| proxy_redirect off; | ||||
| proxy_send_timeout 120s; | ||||
| proxy_socket_keepalive on; | ||||
| proxy_ssl_verify off; | ||||
| 
 | ||||
| proxy_set_header Accept-Encoding ""; | ||||
| proxy_set_header Host $host; | ||||
| proxy_set_header X-Forwarded-For $remote_addr; | ||||
| proxy_set_header X-Forwarded-Proto $scheme; | ||||
| proxy_set_header Connection $connection_upgrade; | ||||
| proxy_set_header Upgrade $http_upgrade; | ||||
| 
 | ||||
| client_max_body_size 50M; | ||||
| ``` | ||||
| 
 | ||||
| Every time we use a `proxy_forward`, we include this snippet. There are 2 more | ||||
| things we might set: trusted locations that can use the admin endpoints, and a | ||||
| dedicated DNS-recursor. We include the `snippets/private.conf` in the | ||||
| forwards to admin endpoints, so that not the entire Internet can play with it. | ||||
| The dedicated nameserver is something you really want, because synchronising a | ||||
| large room can easily result in 100.000+ DNS requests. You'll hit flood | ||||
| protection on most servers if you do that. | ||||
| 
 | ||||
| List the addresses from which you want to allow admin access in | ||||
| `snippets/private.conf`: | ||||
| 
 | ||||
| ``` | ||||
| allow 127.0.0.1; | ||||
| allow ::1; | ||||
| allow 12.23.45.78; | ||||
| allow 87.65.43.21; | ||||
| allow dead:beef::/48; | ||||
| allow 2a10:1234:abcd::1; | ||||
| deny all; | ||||
| satisfy all; | ||||
| ``` | ||||
| 
 | ||||
| Of course, subsitute these random addresses for the ones you trust. The | ||||
| dedicated nameserver (if you have one, which is strongly recommended) should | ||||
| be configured in `conf.d/resolver.conf`: | ||||
| 
 | ||||
| ``` | ||||
| resolver [::1] 127.0.0.1 valid=60; | ||||
| resolver_timeout 10s; | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Maps {#maps} | ||||
| 
 | ||||
| A map sets a variable based on, usually, another variable. One case we use this | ||||
| is in determining the type of sync a client is doing. A normal sync, simply | ||||
| updating an existing session, is a rather lightweight operation. An initial sync, | ||||
| meaning a full sync because the session is brand new, is not so lightweight. | ||||
| 
 | ||||
| A normal sync can be recognised by the `since` bit in the request: it tells | ||||
| the server when its last sync was. If there is no `since`, we're dealing with | ||||
| an initial sync. | ||||
| 
 | ||||
| We want to forward requests for normal syncs to the `normal_sync` workers, and | ||||
| the initial syncs to the `initial_sync` workers. | ||||
| 
 | ||||
| We decide to which type of worker to forward the sync request to by looking at | ||||
| the presence or absence of `since`: if it's there, it's a normal sync and we | ||||
| set the variable `$sync` to `normal_sync`. If it's not there, we set `$sync` to | ||||
| `initial_sync`. The content of `since` is irrelevant for nginx. | ||||
| 
 | ||||
| This is what the map looks like: | ||||
| 
 | ||||
| ``` | ||||
| map $arg_since $sync { | ||||
|     default normal_sync; | ||||
|     '' initial_sync; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| We evaluate `$arg_since` to set `$sync`: `$arg_since` is nginx's variable `$arg_` | ||||
| followed by `since`, the argument we want. See [the index of | ||||
| variables in nginx](https://nginx.org/en/docs/varindex.html) for more | ||||
| variables we can use in nginx. | ||||
| 
 | ||||
| By default we set `$sync` to `normal_sync`, unless the argument `since` is | ||||
| empty (absent); then we set it to `initial_sync`. | ||||
| 
 | ||||
| After this mapping, we forward the request to the correct worker like this: | ||||
| 
 | ||||
| ``` | ||||
| proxy_pass http://$sync; | ||||
| ``` | ||||
| 
 | ||||
| See a complete example of maps in the file [maps.conf](maps.conf). | ||||
| 
 | ||||
| 
 | ||||
| # Upstreams | ||||
| 
 | ||||
| In our configuration, nginx is not only a reverse proxy, it's also a load balancer. | ||||
| Just like what `haproxy` does, it can forward requests to "servers" behind it. | ||||
| Such a server is the inbound UNIX socket of a worker, and there can be several | ||||
| of them in one group. | ||||
| 
 | ||||
| Let's start with a simple one, the `login` worker, that handles the login | ||||
| process for clients. There's only one worker, so only one socket: | ||||
| 
 | ||||
| ``` | ||||
| upstream login { | ||||
|     server unix:/run/matrix-synapse/inbound_login.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Ater this definition, we can forward traffic to `login`. What traffic to | ||||
| forward is decided in the `location` statements, see further. | ||||
| 
 | ||||
| ## Synchronisation | ||||
| 
 | ||||
| A more complex example are the sync workers. Under [Maps](#Maps) we split sync | ||||
| requests into two different types; those different types are handled by | ||||
| different worker pools. In our case we have 2 workers for the initial_sync | ||||
| requests, and 3 for the normal ones: | ||||
| 
 | ||||
| ``` | ||||
| upstream initial_sync { | ||||
|     hash $mxid_localpart consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_initial_sync1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_initial_sync2.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| upstream normal_sync { | ||||
|     hash $mxid_localpart consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync3.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| The `hash` bit is to make sure that request from one user are consistently | ||||
| forwarded to the same worker. We filled the variable `$mxid_localpart` in the | ||||
| maps. | ||||
| 
 | ||||
| ## Federation | ||||
| 
 | ||||
| Something similar goes for the federation workers. Some requests need to go | ||||
| to the same worker as all the other requests from the same IP-addres, other | ||||
| can go to any of these workers. | ||||
| 
 | ||||
| We define two upstreams with the same workers, only with different names and | ||||
| the explicit IP-address ordering for one: | ||||
| 
 | ||||
| ``` | ||||
| upstream incoming_federation { | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| upstream federation_requests { | ||||
|     hash $remote_addr consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Same workers, different handling. See how we forward requests in the next | ||||
| paragraph. | ||||
| 
 | ||||
| See [upstreams.conf](upstreams.conf) for a complete example. | ||||
| 
 | ||||
| 
 | ||||
| # Locations | ||||
| 
 | ||||
| Now that we have defined the workers and/or worker pools, we have to forward | ||||
| the right traffic to the right workers. The Synapse documentation about | ||||
| [available worker | ||||
| types](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications) | ||||
| lists which endpoints a specific worker type can handle.  | ||||
| 
 | ||||
| ## Login | ||||
| 
 | ||||
| Let's forward login requests to our login worker. The [documentation for the | ||||
| generic_worker](https://element-hq.github.io/synapse/latest/workers.html#synapseappgeneric_worker) | ||||
| says these endpoints are for registration and login: | ||||
| 
 | ||||
| ``` | ||||
| # Registration/login requests | ||||
| ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ | ||||
| ^/_matrix/client/(r0|v3|unstable)/register$ | ||||
| ^/_matrix/client/(r0|v3|unstable)/register/available$ | ||||
| ^/_matrix/client/v1/register/m.login.registration_token/validity$ | ||||
| ^/_matrix/client/(r0|v3|unstable)/password_policy$ | ||||
| ``` | ||||
| 
 | ||||
| We forward that to our worker with this `location` definition, using the | ||||
| `proxy_forward` settings we defined earlier: | ||||
| 
 | ||||
| ``` | ||||
| location ~ ^(/_matrix/client/(api/v1|r0|v3|unstable)/login|/_matrix/client/(r0|v3|unstable)/register|/_matrix/client/(r0|v3|unstable)/register/available|/_matrix/client/v1/register/m.login.registration_token/validity|/_matrix/client/(r0|v3|unstable)/password_policy)$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://login; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| ## Synchronisation | ||||
| 
 | ||||
| The docs say that the `generic_worker` can handle these requests for synchronisation | ||||
| requests: | ||||
| 
 | ||||
| ``` | ||||
| # Sync requests | ||||
| ^/_matrix/client/(r0|v3)/sync$ | ||||
| ^/_matrix/client/(api/v1|r0|v3)/events$ | ||||
| ^/_matrix/client/(api/v1|r0|v3)/initialSync$ | ||||
| ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ | ||||
| ``` | ||||
| 
 | ||||
| We forward those to our 2 worker pools making sure the heavy initial syncs go | ||||
| to the `initial_sync` pool, and the normal ones to `normal_sync`. We use the  | ||||
| variable `$sync`for that, which we defined in maps.conf. | ||||
| 
 | ||||
| ``` | ||||
| # Normal/initial sync | ||||
| location ~ ^/_matrix/client/(r0|v3)/sync$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://$sync; | ||||
| } | ||||
| 
 | ||||
| # Normal sync | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://normal_sync; | ||||
| } | ||||
| 
 | ||||
| # Initial sync | ||||
| location ~ ^(/_matrix/client/(api/v1|r0|v3)/initialSync|/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync)$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://initial_sync; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| ## Media | ||||
| 
 | ||||
| The media worker is slightly different: some parts are public, but a few bits | ||||
| are admin stuff. We split those, and limit the admin endpoints to the trusted | ||||
| addresses we defined earlier: | ||||
| 
 | ||||
| ``` | ||||
| # Media, public | ||||
| location ~* ^(/_matrix/((client|federation)/[^/]+/)media/|/_matrix/media/v3/upload/) { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://media; | ||||
| } | ||||
| 
 | ||||
| # Media, admin | ||||
| location ~ ^/_synapse/admin/v1/(purge_)?(media(_cache)?|room|user|quarantine_media|users)/[\s\S]+|media$ { | ||||
|     include snippets/private.conf; | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://media; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| # Federation | ||||
| 
 | ||||
| Federation is done by two types of workers: one pool for requests from our | ||||
| server to the rest of the world, and one pool for everything coming in from the | ||||
| outside world. Only the latter is relevant for nginx. | ||||
| 
 | ||||
| The documentation mentions two different types of federation: | ||||
| * Federation requests | ||||
| * Inbound federation transaction request | ||||
| 
 | ||||
| The second is special, in that requests for that specific endpoint must be | ||||
| balanced by IP-address. The "normal" federation requests can be sent to any | ||||
| worker. We're sending all these requests to the same workers, but we make sure | ||||
| to always send requests from 1 IP-address to the same worker: | ||||
| 
 | ||||
| ``` | ||||
| # Federation readers | ||||
| location ~ ^(/_matrix/federation/v1/event/|/_matrix/federation/v1/state/|/_matrix/federation/v1/state_ids/|/_matrix/federation/v1/backfill/|/_matrix/federation/v1/get_missing_events/|/_matrix/federation/v1/publicRooms|/_matrix/federation/v1/query/|/_matrix/federation/v1/make_join/|/_matrix/federation/v1/make_leave/|/_matrix/federation/(v1|v2)/send_join/|/_matrix/federation/(v1|v2)/send_leave/|/_matrix/federation/v1/make_knock/|/_matrix/federation/v1/send_knock/|/_matrix/federation/(v1|v2)/invite/|/_matrix/federation/v1/event_auth/|/_matrix/federation/v1/timestamp_to_event/|/_matrix/federation/v1/exchange_third_party_invite/|/_matrix/federation/v1/user/devices/|/_matrix/key/v2/query|/_matrix/federation/v1/hierarchy/) { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://incoming_federation; | ||||
| } | ||||
| # Inbound federation transactions | ||||
| location ~ ^/_matrix/federation/v1/send/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://federation_requests; | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
|  | @ -1,13 +0,0 @@ | |||
| # These settings optimize the connection handling. Store this file under /etc/nginx/conf.d, because | ||||
| # it should be loaded by default. | ||||
| 
 | ||||
| client_body_buffer_size 32m; | ||||
| client_header_buffer_size 32k; | ||||
| client_max_body_size 1g; | ||||
| http2_max_concurrent_streams 128; | ||||
| keepalive_timeout 65; | ||||
| keepalive_requests 100; | ||||
| large_client_header_buffers 4 16k; | ||||
| server_names_hash_bucket_size 128; | ||||
| tcp_nodelay on; | ||||
| server_tokens off; | ||||
|  | @ -1,111 +0,0 @@ | |||
| # This file describes the forwarding of (almost) every endpoint to a worker or pool of | ||||
| # workers. This file should go in /etc/nginx/snippets, because we need to load it once, on | ||||
| # the right place in our site-definition. | ||||
| 
 | ||||
| # Account-data | ||||
| location ~ ^(/_matrix/client/(r0|v3|unstable)/.*/tags|/_matrix/client/(r0|v3|unstable)/.*/account_data) { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://account_data; | ||||
| } | ||||
| 
 | ||||
| # Typing | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://typing; | ||||
| } | ||||
| 
 | ||||
| # Receipts | ||||
| location ~ ^(/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt|/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers) { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://receipts; | ||||
| } | ||||
| 
 | ||||
| # Presence | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://presence; | ||||
| } | ||||
| 
 | ||||
| # To device | ||||
| location ~ ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://todevice; | ||||
| } | ||||
| 
 | ||||
| # Push rules | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://push_rules; | ||||
| } | ||||
| 
 | ||||
| # Userdir | ||||
| location ~ ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://userdir; | ||||
| } | ||||
| 
 | ||||
| # Media, users1 | ||||
| location ~* ^/_matrix/((client|federation)/[^/]+/)media/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://media; | ||||
| } | ||||
| # Media, users2 | ||||
| location ~* ^/_matrix/media/v3/upload { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://media; | ||||
| } | ||||
| 
 | ||||
| # Media, admin | ||||
| location ~ ^/_synapse/admin/v1/(purge_)?(media(_cache)?|room|user|quarantine_media|users)/[\s\S]+|media$ { | ||||
|     include snippets/private.conf; | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://media; | ||||
| } | ||||
| 
 | ||||
| # Login | ||||
| location ~ ^(/_matrix/client/(api/v1|r0|v3|unstable)/login|/_matrix/client/(r0|v3|unstable)/register|/_matrix/client/(r0|v3|unstable)/register/available|/_matrix/client/v1/register/m.login.registration_token/validity|/_matrix/client/(r0|v3|unstable)/password_policy)$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://login; | ||||
| } | ||||
| 
 | ||||
| # Normal/initial sync: | ||||
| # To which upstream to pass the request depends on the map "$sync" | ||||
| location ~ ^/_matrix/client/(r0|v3)/sync$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://$sync; | ||||
| } | ||||
| # Normal sync: | ||||
| # These endpoints are used for normal syncs | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://normal_sync; | ||||
| } | ||||
| # Initial sync: | ||||
| # These endpoints are used for initial syncs | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://initial_sync; | ||||
| } | ||||
| location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://initial_sync; | ||||
| } | ||||
| 
 | ||||
| # Federation | ||||
| # All the "normal" federation stuff: | ||||
| location ~ ^(/_matrix/federation/v1/event/|/_matrix/federation/v1/state/|/_matrix/federation/v1/state_ids/|/_matrix/federation/v1/backfill/|/_matrix/federation/v1/get_missing_events/|/_matrix/federation/v1/publicRooms|/_matrix/federation/v1/query/|/_matrix/federation/v1/make_join/|/_matrix/federation/v1/make_leave/|/_matrix/federation/(v1|v2)/send_join/|/_matrix/federation/(v1|v2)/send_leave/|/_matrix/federation/v1/make_knock/|/_matrix/federation/v1/send_knock/|/_matrix/federation/(v1|v2)/invite/|/_matrix/federation/v1/event_auth/|/_matrix/federation/v1/timestamp_to_event/|/_matrix/federation/v1/exchange_third_party_invite/|/_matrix/federation/v1/user/devices/|/_matrix/key/v2/query|/_matrix/federation/v1/hierarchy/) { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://incoming_federation; | ||||
| } | ||||
| # Inbound federation transactions: | ||||
| location ~ ^/_matrix/federation/v1/send/ { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://federation_requests; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| # Main thread for all the rest | ||||
| location / { | ||||
|     include snippets/proxy.conf; | ||||
|     proxy_pass http://inbound_main; | ||||
| 
 | ||||
|  | @ -1,55 +0,0 @@ | |||
| # These maps set all kinds of variables we can use later in our configuration. This fil | ||||
| # should be stored under /etc/nginx/conf.d so that it is loaded whenever nginx starts. | ||||
| 
 | ||||
| # List of allowed origins, can only send one. | ||||
| map $http_origin $allow_origin {  | ||||
|         ~^https?://element.example.com$ $http_origin; | ||||
|         ~^https?://call.example.com$ $http_origin; | ||||
|         ~^https?://someserver.example.com$ $http_origin; | ||||
|         # NGINX won't set empty string headers, so if no match, header is unset. | ||||
|         default ""; | ||||
| } | ||||
| 
 | ||||
| # Client username from MXID | ||||
| map $http_authorization $mxid_localpart { | ||||
|   default                           $http_authorization; | ||||
|   "~Bearer syt_(?<username>.*?)_.*" $username; | ||||
|   ""                                $accesstoken_from_urlparam; | ||||
| } | ||||
| 
 | ||||
| # Whether to upgrade HTTP connection | ||||
| map $http_upgrade $connection_upgrade { | ||||
|   default upgrade; | ||||
|   '' close; | ||||
| } | ||||
| 
 | ||||
| #Extract room name from URI | ||||
| map $request_uri $room_name { | ||||
|   default "not_room"; | ||||
|   "~^/_matrix/(client|federation)/.*?(?:%21|!)(?<room>[\s\S]+)(?::|%3A)(?<domain>[A-Za-z0-9.\-]+)" "!$room:$domain"; | ||||
| } | ||||
| 
 | ||||
| # Choose sync worker based on the existence of "since" query parameter | ||||
| map $arg_since $sync { | ||||
|     default normal_sync; | ||||
|     '' initial_sync; | ||||
| } | ||||
| 
 | ||||
| # Extract username from access token passed as URL parameter | ||||
| map $arg_access_token $accesstoken_from_urlparam { | ||||
|     # Defaults to just passing back the whole accesstoken | ||||
|     default   $arg_access_token; | ||||
|     # Try to extract username part from accesstoken URL parameter | ||||
|     "~syt_(?<username>.*?)_.*"           $username; | ||||
| } | ||||
| 
 | ||||
| # Extract username from access token passed as authorization header | ||||
| map $http_authorization $mxid_localpart { | ||||
|     # Defaults to just passing back the whole accesstoken | ||||
|     default                              $http_authorization; | ||||
|     # Try to extract username part from accesstoken header | ||||
|     "~Bearer syt_(?<username>.*?)_.*"    $username; | ||||
|     # if no authorization-header exist, try mapper for URL parameter "access_token" | ||||
|     ""                                   $accesstoken_from_urlparam; | ||||
| } | ||||
| 
 | ||||
|  | @ -1,13 +0,0 @@ | |||
| # This file defines the "safe" IP addresses that are allowed to use the admin endpoints | ||||
| # of our installation. Store this file under /etc/nginx/snippets, so you can load it on | ||||
| # demand for the bits you want/need to protect. | ||||
| 
 | ||||
| allow 127.0.0.1; | ||||
| allow ::1; | ||||
| allow 12.23.45.78; | ||||
| allow 87.65.43.21; | ||||
| allow dead:beef::/48; | ||||
| allow 2a10:1234:abcd::1; | ||||
| deny all; | ||||
| satisfy all; | ||||
| 
 | ||||
|  | @ -1,8 +0,0 @@ | |||
| # These are a few proxy settings that should be default. These are not used in the proxy_forward to | ||||
| # our workers, we don't want buffering there. Store this file under /etc/nginx/conf.d because it contains | ||||
| # defaults. | ||||
| 
 | ||||
| proxy_buffer_size 128k; | ||||
| proxy_buffers 4 256k; | ||||
| proxy_busy_buffers_size 256k; | ||||
| 
 | ||||
|  | @ -1,20 +0,0 @@ | |||
| # Settings that we want for every proxy_forward to our workers. This file should live | ||||
| # under /etc/nginx/snippets, because it should not be loaded automatically but on demand. | ||||
| 
 | ||||
| proxy_connect_timeout 2s; | ||||
| proxy_buffering off; | ||||
| proxy_http_version 1.1; | ||||
| proxy_read_timeout 3600s; | ||||
| proxy_redirect off; | ||||
| proxy_send_timeout 120s; | ||||
| proxy_socket_keepalive on; | ||||
| proxy_ssl_verify off; | ||||
| 
 | ||||
| proxy_set_header Accept-Encoding ""; | ||||
| proxy_set_header Host $host; | ||||
| proxy_set_header X-Forwarded-For $remote_addr; | ||||
| proxy_set_header X-Forwarded-Proto $scheme; | ||||
| proxy_set_header Connection $connection_upgrade; | ||||
| proxy_set_header Upgrade $http_upgrade; | ||||
| 
 | ||||
| client_max_body_size 50M; | ||||
|  | @ -1,116 +0,0 @@ | |||
| # Stream workers first, they are special. The documentation says: | ||||
| # "each stream can only have a single writer" | ||||
| 
 | ||||
| # Account-data | ||||
| upstream account_data { | ||||
|     server unix:/run/matrix-synapse/inbound_accountdata.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Userdir | ||||
| upstream userdir { | ||||
|     server unix:/run/matrix-synapse/inbound_userdir.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Typing | ||||
| upstream typing { | ||||
|     server unix:/run/matrix-synapse/inbound_typing.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # To device | ||||
| upstream todevice { | ||||
|     server unix:/run/matrix-synapse/inbound_todevice.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Receipts | ||||
| upstream receipts { | ||||
|     server unix:/run/matrix-synapse/inbound_receipts.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Presence | ||||
| upstream presence { | ||||
|     server unix:/run/matrix-synapse/inbound_presence.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Push rules | ||||
| upstream push_rules { | ||||
|     server unix:/run/matrix-synapse/inbound_push_rules.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # End of the stream workers, the following workers are of a "normal" type | ||||
| 
 | ||||
| # Media | ||||
| # If more than one media worker is used, they *must* all run on the same machine | ||||
| upstream media { | ||||
|     server unix:/run/matrix-synapse/inbound_mediaworker.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Synchronisation by clients: | ||||
| 
 | ||||
| # Normal sync. Not particularly heavy, but happens a lot | ||||
| upstream normal_sync { | ||||
|     # Use the username mapper result for hash key | ||||
|     hash $mxid_localpart consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_normal_sync3.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| # Initial sync | ||||
| # Much heavier than a normal sync, but happens less often | ||||
| upstream initial_sync { | ||||
|     # Use the username mapper result for hash key | ||||
|     hash $mxid_localpart consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_initial_sync1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_initial_sync2.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Login | ||||
| upstream login { | ||||
|     server unix:/run/matrix-synapse/inbound_login.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Clients | ||||
| upstream client { | ||||
|     hash $mxid_localpart consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_clientworker1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_clientworker2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_clientworker3.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_clientworker4.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Federation | ||||
| # "Normal" federation, balanced round-robin over 4 workers. | ||||
| upstream incoming_federation { | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| # Inbound federation requests, need to be balanced by IP-address, but can go | ||||
| # to the same pool of workers as the other federation stuff. | ||||
| upstream federation_requests { | ||||
|     hash $remote_addr consistent; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader1.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader2.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader3.sock max_fails=0; | ||||
|     server unix:/run/matrix-synapse/inbound_federation_reader4.sock max_fails=0; | ||||
|     keepalive 10; | ||||
| } | ||||
| 
 | ||||
| # Main thread for all the rest | ||||
| upstream inbound_main { | ||||
|   server unix:/run/matrix-synapse/inbound_main.sock max_fails=0; | ||||
|   keepalive 10; | ||||
| } | ||||
|  | @ -75,10 +75,8 @@ Make sure you add these lines under the one that gives access to the postgres | |||
| superuser, the first line. | ||||
| 
 | ||||
| 
 | ||||
| # Tuning {#tuning} | ||||
| # Tuning | ||||
| 
 | ||||
| This is for later, check [Tuning your PostgreSQL Server](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server) | ||||
| on the PostgreSQL wiki. | ||||
| 
 | ||||
| For tuning in the scenario with [Synapse workers](../synapse/workers), see [this | ||||
| useful site](https://tcpipuk.github.io/postgres/tuning/index.html). | ||||
|  |  | |||
|  | @ -1,33 +0,0 @@ | |||
| # Synapse-admin | ||||
| 
 | ||||
| This is the webgui for Synapse. | ||||
| 
 | ||||
| Installation can be done in 3 ways | ||||
| ([see Github](https://github.com/Awesome-Technologies/synapse-admin)), we'll | ||||
| pick the easiest one: using the precompiled tar. | ||||
| 
 | ||||
| Unpack it under `/var/www`, link `synapse-admin` to the directory that the | ||||
| archive creates. This is to make sure you can easily unpack a newer version, | ||||
| prepare that, and then change the symlink. | ||||
| 
 | ||||
| ``` | ||||
| # ls -l /var/www | ||||
| total 8 | ||||
| drwxr-xr-x 2 root root 4096 Nov  4 18:05 html | ||||
| lrwxrwxrwx 1 root root   20 Nov 18 13:24 synapse-admin -> synapse-admin-0.10.3 | ||||
| drwxr-xr-x 5 root root 4096 Nov 18 15:54 synapse-admin-0.10.3 | ||||
| ``` | ||||
| 
 | ||||
| We use 0.10.3, but point nginx to '/var/www/synapse-admin'. Configuring nginx | ||||
| is fairly straightforward, [see here](../nginx/README.md#synapse-admin). | ||||
| 
 | ||||
| You should probably restrict Synapse-Admin to your own Synapse-server, instead | ||||
| of letting users fill in whatever they want. Do this by adding this bit to | ||||
| `config.json`. In our config we've moved that file to | ||||
| `/etc/synapse-admin` and link to that from `/var/www/synapse-admin`. | ||||
| 
 | ||||
| ``` | ||||
| { | ||||
|   "restrictBaseUrl": "https://matrix.example.com" | ||||
| } | ||||
| ``` | ||||
|  | @ -13,7 +13,7 @@ documentation](https://element-hq.github.io/synapse/latest/setup/installation.ht | |||
| ``` | ||||
| apt install -y lsb-release wget apt-transport-https build-essential python3-dev libffi-dev \ | ||||
|                python3-pip python3-setuptools sqlite3 \ | ||||
|                libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev git python3-jinja2 | ||||
|                libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev | ||||
| 
 | ||||
| wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg | ||||
| 
 | ||||
|  | @ -30,15 +30,7 @@ be configured with yaml-files in this directory. | |||
| 
 | ||||
| Configure the domain you with to use in `/etc/matrix-synapse/conf.d/server_name.yaml`. | ||||
| What you configure here will also be the global part of your Matrix handles | ||||
| (the part after the colon). Also add the URL clients should connect to: | ||||
| 
 | ||||
| ``` | ||||
| server_name: example.com | ||||
| public_baseurl: https://matrix.example.com/ | ||||
| ``` | ||||
| 
 | ||||
| The `public_baseurl` will probably be different than the `server_name`, see | ||||
| also [Delegation and DNS](#Delegation). | ||||
| (the part after the colon). | ||||
| 
 | ||||
| You now have a standard Matrix server that uses sqlite. You really don't want | ||||
| to use this in production, so probably want to replace this with PostgreSQL. | ||||
|  | @ -51,7 +43,7 @@ There are two different ways to configure Synapse, documented here: | |||
| We'll use Synapse, using the workers architecture to make it scalable, flexible and reusable. | ||||
| 
 | ||||
| 
 | ||||
| # Listeners | ||||
| ## Listeners | ||||
| 
 | ||||
| A fresh installation configures one listener, for both client and federation | ||||
| traffic. This listens on port 8008 on localhost (IPv4 and IPv6) and does not | ||||
|  | @ -69,6 +61,9 @@ listeners: | |||
|         compress: false | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| # Database | ||||
| 
 | ||||
| The default installation leaves you with an sqlite3 database. Nice for experimenting, but | ||||
|  | @ -146,501 +141,4 @@ This will ask for a password, choose a safe one. | |||
| Logging is configured in `log.yaml`. Some logging should go to systemd, the | ||||
| more specific logging to Synapse's own logfile(s). | ||||
| 
 | ||||
| This part is yet to be completed, the default configuration is adequate for | ||||
| most cases. | ||||
| 
 | ||||
| # Delegation and DNS {#Delegation} | ||||
| 
 | ||||
| If you run your server under a different FQDN than just the domain name you | ||||
| want to use, you need to delegate: point from your domain to the server. | ||||
| 
 | ||||
| Example. You want to use example.com for your domain, but your server is | ||||
| called matrix.example.com. To make that work, you need to serve 2 bits of | ||||
| JSON-code on example.com to point clients and servers to the correct | ||||
| machine: matrix.example.com. | ||||
| 
 | ||||
| Pointing servers to the correct server is done by publishing this bit of | ||||
| JSON-code under `https://example.com/.well-known/matrix/server`: | ||||
| 
 | ||||
| ``` | ||||
| { | ||||
|   "m.homeserver": {"base_url": "https://matrix.example.com"} | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Pointing clients to the correct server needs this at | ||||
| `https://example.com/.well-known/matrix/client`: | ||||
| 
 | ||||
| ``` | ||||
| { | ||||
|   "m.server": "matrix.example.com" | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Very important: both names (example.com and matrix.example.com) must be A | ||||
| and/or AAAA records in DNS, not CNAME. | ||||
| 
 | ||||
| You can also publish support data: administrator, security officer, helpdesk | ||||
| page. Publish that as `.well-known/matrix/support`. | ||||
| 
 | ||||
| See the included files for more elaborate examples, and check | ||||
| [nginx](../nginx) for details about how to publish this data. | ||||
| 
 | ||||
| 
 | ||||
| # E-mail {#Email} | ||||
| 
 | ||||
| Synapse should probably be able to send out e-mails; notifications for those | ||||
| who want that, and password reset for those who need one. | ||||
| 
 | ||||
| You configure this under the section `email` (yes, really).  | ||||
| 
 | ||||
| First of all, you need an SMTP-server that is configured to send e-mail for | ||||
| your domain. Configuring that is out of scope, we'll assume we can use the | ||||
| server `smtp.example.com`. | ||||
| 
 | ||||
| Configure this in `conf.d/email.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| email: | ||||
|   smtp_host: smtp.example.com | ||||
|   smtp_port: 465 | ||||
|   smtp_user: matrix@example.com | ||||
|   smtp_pass: SuperSecretPassword | ||||
|   force_tls: true | ||||
|   notif_from: "Your Matrix server <matrix@example.com>" | ||||
| ``` | ||||
| 
 | ||||
| This configures an SMTP-connection with SSL (port 465, `force_tls`). See Matrix' | ||||
| [email documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=require_transport_security#email) | ||||
| for more information. | ||||
| 
 | ||||
| 
 | ||||
| # Media store {#mediastore} | ||||
| 
 | ||||
| Files and avatars need to be stored somewhere, we configure these options in | ||||
| `conf.d/mediastore.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| media_store_path: /var/lib/matrix-synapse/media | ||||
| enable_authenticated_media: true | ||||
| max_upload_size: 50M | ||||
| url_preview_enabled: true | ||||
| url_preview_ip_range_blacklist: | ||||
|   - '127.0.0.0/8' | ||||
|   - '10.0.0.0/8' | ||||
|   - '172.16.0.0/12' | ||||
|   - '192.168.0.0/16' | ||||
|   - '100.64.0.0/10' | ||||
|   - '192.0.0.0/24' | ||||
|   - '169.254.0.0/16' | ||||
|   - '192.88.99.0/24' | ||||
|   - '198.18.0.0/15' | ||||
|   - '192.0.2.0/24' | ||||
|   - '198.51.100.0/24' | ||||
|   - '203.0.113.0/24' | ||||
|   - '224.0.0.0/4' | ||||
|   - '::1/128' | ||||
|   - 'fe80::/10' | ||||
|   - 'fc00::/7' | ||||
|   - '2001:db8::/32' | ||||
|   - 'ff00::/8' | ||||
|   - 'fec0::/10' | ||||
| ``` | ||||
| 
 | ||||
| These are a few sane (?) defaults, check [Matrix' documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=media_store_path#media-store) | ||||
| for many more options. | ||||
| 
 | ||||
| 
 | ||||
| # Homeserver blocking {#blocking} | ||||
| 
 | ||||
| This is a series of options that can be used to block and/or limit users. The | ||||
| whole list of options can be found in [Matrix' documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=mau_stats_only%3A#homeserver-blocking), | ||||
| we're going to pick out a few useful ones. | ||||
| 
 | ||||
| Let's configure these options in `conf.d/homeserver_blocking.yaml`. | ||||
| 
 | ||||
| ``` | ||||
| admin_contact: matrixadmin@example.com | ||||
| mau_stats_only: true | ||||
| max_avatar_size: 2M | ||||
| allowed_avatar_mimetypes: | ||||
|   - "image/png" | ||||
|   - "image/jpeg" | ||||
|   - "image/gif" | ||||
| forgotten_room_retention_period: 7d | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Authentication {#authentication} | ||||
| 
 | ||||
| Logging in can be done in basically two ways: an internal or external | ||||
| database. Let's start with the first: users and their passwords are stored in | ||||
| Synapse's database. | ||||
| 
 | ||||
| We use `conf.d/authentication.yaml` to configure this stuff. | ||||
| 
 | ||||
| ``` | ||||
| password_config: | ||||
|   policy: | ||||
|     enabled: true | ||||
|     localdb_enabled: true | ||||
|     pepper: <random string> | ||||
|     minimum_length: 8 | ||||
|     require_digit: true | ||||
|     require_symbol: true | ||||
|     require_lowercase: true | ||||
|     require_uppercase: true | ||||
| ``` | ||||
| 
 | ||||
| With this bit, we configure Synapse to let users pick and change their own | ||||
| passwords, as long as they meet the configured conditions. Mind you: `pepper` is | ||||
| a secret random string that should *NEVER* be changed after initial setup.  | ||||
| 
 | ||||
| But in a bigger environment you'll probably want to use some authentication | ||||
| backend, such as LDAP. LDAP is configured by means of a module (see | ||||
| [Synapse LDAP auth Provider](https://github.com/matrix-org/matrix-synapse-ldap3/) | ||||
| on Github). | ||||
| 
 | ||||
| Configuring Synapse to use LDAP, would be something like this: | ||||
| 
 | ||||
| ``` | ||||
| password_config: | ||||
|   policy: | ||||
|     enabled: only_for_reauth | ||||
|     localdb_enabled: false | ||||
| 
 | ||||
| password_providers: | ||||
|   - module: "ldap_auth_provider.LdapAuthProvider" | ||||
|     config: | ||||
|       enabled: true | ||||
|       uri: "ldap://ldap.example.com:389" | ||||
|       start_tls: true | ||||
|       base: "ou=users,dc=example,dc=com" | ||||
|       attributes: | ||||
|          uid: "uid" | ||||
|          mail: "mail" | ||||
|          name: "cn" | ||||
|       filter: "(&(objectClass=posixAccount)(accountStatus=active))" | ||||
| 
 | ||||
|       mode: "search" | ||||
|       bind_dn: "cn=matrix,ou=service,dc=example,dc=com" | ||||
|       bind_password: "<very secure password>" | ||||
| ``` | ||||
| 
 | ||||
| This would connect to ldap.example.com over TLS, and authenticate users that | ||||
| live under `ou=users,dc=example,dc=com` and that are active Posix | ||||
| accounts. Users will not be able to change their passwords via Matrix, they | ||||
| have to do that in LDAP. | ||||
| 
 | ||||
| The bottom 3 lines enable search mode, necessary to find users' displayname | ||||
| and e-mail address. These values are in LDAP under the attributes "mail" and | ||||
| "cn" (completely dependent on your LDAP DIT of course, this setup is common | ||||
| for OpenLDAP). The bind_dn and bind_password are for the account Synapse can | ||||
| use to connect and search, necessary if anonymous access is prohibited. | ||||
| 
 | ||||
| 
 | ||||
| # Server configuration {#serverconfig} | ||||
| 
 | ||||
| See [Define your homeserver name and other base options](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=require_auth_for_profile_requests#server) | ||||
| in the Synapse documentation. | ||||
| 
 | ||||
| It would be logical to put the next options under `conf.d/server.yaml`, but | ||||
| Debian insists on `conf.d/server_name.yaml` existing and containing the name | ||||
| of the domain. So we'll use that file for the next options as well. Add these | ||||
| options: | ||||
| 
 | ||||
| ``` | ||||
| presence: | ||||
|   enabled: true | ||||
|   include_offline_users_on_sync: false | ||||
| 
 | ||||
| require_auth_for_profile_requests: true | ||||
| allow_public_rooms_over_federation: true | ||||
| 
 | ||||
| ip_range_blacklist: | ||||
|   - '127.0.0.0/8' | ||||
|   - '10.0.0.0/8' | ||||
|   - '172.16.0.0/12' | ||||
|   - '192.168.0.0/16' | ||||
|   - '100.64.0.0/10' | ||||
|   - '192.0.0.0/24' | ||||
|   - '169.254.0.0/16' | ||||
|   - '192.88.99.0/24' | ||||
|   - '198.18.0.0/15' | ||||
|   - '192.0.2.0/24' | ||||
|   - '198.51.100.0/24' | ||||
|   - '203.0.113.0/24' | ||||
|   - '224.0.0.0/4' | ||||
|   - '::1/128' | ||||
|   - 'fe80::/10' | ||||
|   - 'fc00::/7' | ||||
|   - '2001:db8::/32' | ||||
|   - 'ff00::/8' | ||||
|   - 'fec0::/10' | ||||
| 
 | ||||
| filter_timeline_limit: 500 | ||||
| delete_stale_devices_after: 1y | ||||
| ``` | ||||
| 
 | ||||
| These should be reasonable defaults, but do check the [Server block](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#server) | ||||
| in Synapse's documentation for more options and information. | ||||
| 
 | ||||
| 
 | ||||
| # Registration {#Registration} | ||||
| 
 | ||||
| Registration of new users is configured under `conf.d/registration.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| enable_registration: false | ||||
| enable_registration_without_verification: false | ||||
| registrations_require_3pid: email | ||||
| registration_shared_secret: <long random string> | ||||
| allow_guest_access: false | ||||
| 
 | ||||
| enable_set_displayname: false | ||||
| enable_3pid_changes: false | ||||
| ``` | ||||
| 
 | ||||
| The last two lines prohibit users to change their displayname and 3pid-data | ||||
| (i.e. e-mail address and phone number). In many cases you'd want them to be | ||||
| able to set these, of course. But when you use LDAP, which provides these | ||||
| values, you don't want users to change those. | ||||
| 
 | ||||
| See for more options [Synapse's documentation](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#registration). | ||||
| 
 | ||||
| 
 | ||||
| # TURN | ||||
| 
 | ||||
| Check for more information about [how to configure the TURN | ||||
| server](../coturn) or [LiveKit](../element-call#livekit). You probably want | ||||
| LiveKit, but read on if you choose coturn. | ||||
| 
 | ||||
| It might be useful to use both coturn and LiveKit, so as to support both | ||||
| legacy and EC calls, but you'd need to tweak the configurations so that they | ||||
| don't bite each other. | ||||
| 
 | ||||
| Once you've set up your TURN server, configure it in | ||||
| Synapse, in `conf.d/turn.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| turn_shared_secret: "<long random string>" | ||||
| turn_uris: | ||||
|   - "turn:turn.matrixdev.example.com?transport=udp" | ||||
|   - "turn:turn.matrixdev.example.com?transport=tcp" | ||||
| turn_user_lifetime: 86400000 | ||||
| turn_allow_guests: true | ||||
| ``` | ||||
| 
 | ||||
| Restart Synapse to activate this bit. | ||||
| 
 | ||||
| 
 | ||||
| # Consent Tracking {#consenttracking} | ||||
| 
 | ||||
| As administrator you sometimes need to push a message to all your users. See | ||||
| the [Synapse documentation](https://element-hq.github.io/synapse/latest/server_notices.html) | ||||
| to see how to configure that. | ||||
| 
 | ||||
| It's also necessary for moderation ([see Draupnir](../draupnir)). | ||||
| 
 | ||||
| 
 | ||||
| ## Server Notices | ||||
| 
 | ||||
| Server notices allow administrators to send messages to users, much like the | ||||
| `wall` functionality in UNIX/Linux. | ||||
| 
 | ||||
| Add this bit of info to `conf.d/server_notices.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| server_notices: | ||||
|   system_mxid_localpart: server | ||||
|   system_mxid_display_name: "Server Notices" | ||||
| # system_mxid_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" | ||||
|   room_name: "Server Notices" | ||||
| # room_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" | ||||
|   room_topic: "Room used by your server admin to notice you of important | ||||
| information" | ||||
|   auto_join: true | ||||
| ``` | ||||
| 
 | ||||
| This means that the user sending the messages (who isn't really a user anyway) | ||||
| is `server@example.com`, with the display name `Server Notices`. The room that users receive | ||||
| these messages in is called the same. The room will be created if it doesn't | ||||
| yet exist, every user that receives a server message will be put in a room | ||||
| with that name. | ||||
| 
 | ||||
| Every user gets his own room, so if you send a server notice to 100 users, | ||||
| there will be (at least) 100 rooms by that name, all containing 1 user. | ||||
| 
 | ||||
| The option `auto_join` means that users will automatically join the room as | ||||
| soon as it's created. They can leave afterwards, but they'll be put into it again | ||||
| as soon as they receive another server message. | ||||
| 
 | ||||
| The two commented out options are the avatars for user and room. This is a bit | ||||
| tricky. You'll need to upload an image to a room first, so that it's present | ||||
| in the media store. Then you can refer to it by the ID it gets, in the way | ||||
| shown above. These avatars will only be set or changed when you send a server | ||||
| notice. | ||||
| 
 | ||||
| Important bit: you must upload these pictures to an unencrypted room. Pictures | ||||
| in an encrypted room are... well... encrypted, and that causes a problem for | ||||
| the thumbnailer. Pictures in encrypted rooms are stored as MIME type | ||||
| `application/octet-stream`, you want one of the formats you configured under | ||||
| [Homeserver Blocking](#blocking). Or, if you haven't defined a whitelist, at | ||||
| least an image mimetype... | ||||
| 
 | ||||
| Apparently this was a bug that's supposed to be fixed in Synapse 1.20, but we | ||||
| haven't tested that yet. | ||||
| 
 | ||||
| You can find the ID of the picture in the database (table `local_media_repository`) | ||||
| or, more conveniently, in [Synapse-Admin](../synapse-admin), which is also | ||||
| where you'll want to go if you want to send a server notice. | ||||
| 
 | ||||
| In Synapse-Admin, open the User tab, select the user(s) you want to send a | ||||
| notice to, and click "Send Server Notices". | ||||
| 
 | ||||
| If the result is that you're returned to the login screen of Synapse-Admin, | ||||
| there was an error sending the notice. Check the Synapse logs. | ||||
| 
 | ||||
| 
 | ||||
| ## Consent template | ||||
| 
 | ||||
| You can force your users to accept an agreement before you let them on your | ||||
| machine, see the [Synapse Documentation](https://element-hq.github.io/synapse/latest/consent_tracking.html#support-in-synapse-for-tracking-agreement-to-server-terms-and-conditions). | ||||
| 
 | ||||
| First, make the directory where you want Synapse to search for the document, | ||||
| we create the directory `consent_policy`: | ||||
| 
 | ||||
| 
 | ||||
| ``` | ||||
| mkdir -p /var/lib/matrix-synapse/consent_policy/en | ||||
| ``` | ||||
| 
 | ||||
| You'll have to add the directory `en` under that, as every document is assumed | ||||
| to be in English. Support for other languages is on the wish list. | ||||
| 
 | ||||
| Create a Jinja2 template with the texts you want: the text users have to agree | ||||
| to before they can use the service, and the text users that have already | ||||
| agreed will see. Something like this: | ||||
| 
 | ||||
| ``` | ||||
| <!doctype html> | ||||
| <html lang="en"> | ||||
|   <head> | ||||
|     <title>Example End User Policy</title> | ||||
|   </head> | ||||
|   <body> | ||||
|   {% if has_consented %} | ||||
|     <p> | ||||
|       You have already accepted the Example End User Policy. | ||||
|     </p> | ||||
|   {% else %} | ||||
| <h1>Example End User Policy</h1> | ||||
| 
 | ||||
| These are the terms under which you can use this service. Unless you accept these terms, you | ||||
| will not be allowed to send any messages. | ||||
| 
 | ||||
| <ol> | ||||
|   <li>You will not be abusive to other users, be they on this server or on an other. | ||||
|   <li>You will not do other nasty stuff. | ||||
|   <li>Basically: you will behave like a good person. | ||||
| </ol> | ||||
| 
 | ||||
| We promise you a few things too: | ||||
| 
 | ||||
| <ol> | ||||
|   <li>We'll keep your data safe | ||||
|   <li>We won't snoop on you | ||||
|   <li>We'll only turn you in with the authorities if you do nasty stuff. | ||||
| </ol> | ||||
| 
 | ||||
| If you accept these terms, you can use this system. | ||||
|     {% if not public_version %} | ||||
|       <!-- The variables used here are only provided when the 'u' param is given to the homeserver --> | ||||
|       <form method="post" action="consent"> | ||||
|         <input type="hidden" name="v" value="{{version}}"/> | ||||
|         <input type="hidden" name="u" value="{{user}}"/> | ||||
|         <input type="hidden" name="h" value="{{userhmac}}"/> | ||||
|         <input type="submit" value="I accept"/> | ||||
|       </form> | ||||
|     {% endif %} | ||||
|   {% endif %} | ||||
|   </body> | ||||
| </html> | ||||
| ``` | ||||
| 
 | ||||
| The name of this document needs to be a version name with the extension `.html`. | ||||
| Say you want your users to accept version 0.1, the file must be named | ||||
| 0.1.html. This version is referred to in the configuration. | ||||
| 
 | ||||
| After a user has agreed to this policy, he is presented with `success.html`, | ||||
| which you will also have to make (although it's not mentioned in the | ||||
| documentation). This doesn't have to be very complicated. | ||||
| 
 | ||||
| ``` | ||||
| <!doctype html> | ||||
| <html lang="en"> | ||||
|   <head> | ||||
|     <title>ProcoliX End User Policy</title> | ||||
|   </head> | ||||
|   <body> | ||||
| <p>You have agreed to our End User Policy, you can now use our service.</p> | ||||
| 
 | ||||
| <p>Have fun!</p> | ||||
|   </body> | ||||
| </html> | ||||
| ``` | ||||
| 
 | ||||
| We now have the texts ready, time to configure Synapse to use it. | ||||
| 
 | ||||
| Create a `form_secret`: | ||||
| 
 | ||||
| ``` | ||||
| pwgen -csny 30 1 | ||||
| ``` | ||||
| 
 | ||||
| Add this bit to `conf.d/server_notices.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| form_secret: "<previously generated secret>" | ||||
| user_consent: | ||||
|   require_at_registration: true | ||||
|   policy_name: "Example End User Policy" | ||||
|   template_dir: consent_policy | ||||
|   version: <version> | ||||
|   server_notice_content: | ||||
|     msgtype: m.text | ||||
|     body: >- | ||||
|       You have to agree to our End User Policy before you can use this | ||||
|       service. Please read and accept it at %(consent_uri)s. | ||||
|   block_events_error: >- | ||||
|     You haven't accepted the End User Policy yet, so you can't post any | ||||
|     messages yet. Please read and accept the policy at %(consent_uri)s. | ||||
| ``` | ||||
| 
 | ||||
| Last bit it to enable the consent tracking on all listeners where `client` is | ||||
| active. We have only one listener, so we add `consent` to that: | ||||
| 
 | ||||
| ``` | ||||
| listeners: | ||||
|   - port: 8008 | ||||
|     tls: false | ||||
|     type: http | ||||
|     x_forwarded: true | ||||
|     bind_addresses: ['::1', '127.0.0.1'] | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
|         compress: false | ||||
| ``` | ||||
| 
 | ||||
| Restart Synapse for these changes to take effect. | ||||
| 
 | ||||
| If you update your policy, you'll have to copy the current one to a new | ||||
| version, edit that (e.g. `0.2.html`) and change the `version` to the new | ||||
| document. Restart Synapse after that. Your users will all have to agree to the | ||||
| new policy. | ||||
| 
 | ||||
| The options `server_notice_content` and `block_events_error` do not seem to be | ||||
| used, this is something that needs to be investigated. | ||||
|  |  | |||
|  | @ -1,22 +0,0 @@ | |||
| # Authentication stuff | ||||
| 
 | ||||
| password_config: | ||||
|   policy: | ||||
|     enabled: only_for_reauth | ||||
|     localdb_enabled: false | ||||
|      | ||||
| password_providers: | ||||
|   - module: "ldap_auth_provider.LdapAuthProvider" | ||||
|     config: | ||||
|       enabled: true | ||||
|       uri: "ldap://ldap.example.com" | ||||
|       start_tls: true | ||||
|       mode: "search" | ||||
|       base: "ou=users,o=Example,dc=example,dc=eu" | ||||
|       attributes: | ||||
|          uid: "uid" | ||||
|          mail: "mail" | ||||
|          name: "cn" | ||||
|       filter: "(&(objectClass=posixAccount)(accountStatus=active))" | ||||
|       bind_dn: "cn=matrix,ou=service,dc=example,dc=com" | ||||
|       bind_password: "<very secure password>" | ||||
|  | @ -1,19 +0,0 @@ | |||
| experimental_features: | ||||
|   # MSC3266: Room summary API. Used for knocking over federation | ||||
|   msc3266_enabled: true | ||||
| 
 | ||||
| # The maximum allowed duration by which sent events can be delayed, as | ||||
| # per MSC4140. | ||||
| max_event_delay_duration: 24h | ||||
| 
 | ||||
| rc_message: | ||||
|   # This needs to match at least the heart-beat frequency plus a bit of headroom | ||||
|   # Currently the heart-beat is every 5 seconds which translates into a rate of 0.2s | ||||
|   per_second: 0.5 | ||||
|   burst_count: 30 | ||||
| 
 | ||||
| extra_well_known_client_content: | ||||
|   org.matrix.msc4143.rtc_foci: | ||||
|     type: livekit | ||||
|     livekit_service_url: https://livekit.example.com | ||||
| 
 | ||||
|  | @ -1,9 +0,0 @@ | |||
| database: | ||||
|   name: psycopg2 | ||||
|   args: | ||||
|     user: synapse | ||||
|     password: <secure password> | ||||
|     dbname: synapse | ||||
|     host: /var/run/postgresql | ||||
|     cp_min: 5 | ||||
|     cp_max: 10 | ||||
|  | @ -1,9 +0,0 @@ | |||
| # This takes care of sending e-mail | ||||
| 
 | ||||
| email: | ||||
|   smtp_host: smtp.example.com | ||||
|   smtp_port: 465 | ||||
|   smtp_user: matrix@example.com | ||||
|   smtp_pass: <secure password> | ||||
|   force_tls: true | ||||
|   notif_from: "Your Matrix server <matrix@example.com>" | ||||
|  | @ -1,11 +0,0 @@ | |||
| # Various settings for blocking stuff. | ||||
| # See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=mau_stats_only%3A#homeserver-blocking | ||||
| 
 | ||||
| admin_contact: admin@example.com | ||||
| mau_stats_only: true | ||||
| max_avatar_size: 2M | ||||
| allowed_avatar_mimetypes: | ||||
|   - "image/png" | ||||
|   - "image/jpeg" | ||||
|   - "image/gif" | ||||
| forgotten_room_retention_period: 7d | ||||
|  | @ -1,5 +0,0 @@ | |||
| # This file contains secrets | ||||
| 
 | ||||
| signing_key_path: "/etc/matrix-synapse/homeserver.signing.key" | ||||
| macaroon_secret_key: <secure key> | ||||
| registration_shared_secret: <secure key> | ||||
|  | @ -1,29 +0,0 @@ | |||
| # Media stuff | ||||
| # See https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=media_store_path#media-store | ||||
| 
 | ||||
| media_store_path: /var/lib/matrix-synapse/media | ||||
| enable_authenticated_media: true | ||||
| max_upload_size: 50M | ||||
| url_preview_enabled: true | ||||
| url_preview_ip_range_blacklist: | ||||
|   - '127.0.0.0/8' | ||||
|   - '10.0.0.0/8' | ||||
|   - '172.16.0.0/12' | ||||
|   - '192.168.0.0/16' | ||||
|   - '100.64.0.0/10' | ||||
|   - '192.0.0.0/24' | ||||
|   - '169.254.0.0/16' | ||||
|   - '192.88.99.0/24' | ||||
|   - '198.18.0.0/15' | ||||
|   - '192.0.2.0/24' | ||||
|   - '198.51.100.0/24' | ||||
|   - '203.0.113.0/24' | ||||
|   - '224.0.0.0/4' | ||||
|   - '::1/128' | ||||
|   - 'fe80::/10' | ||||
|   - 'fc00::/7' | ||||
|   - '2001:db8::/32' | ||||
|   - 'ff00::/8' | ||||
|   - 'fec0::/10' | ||||
| 
 | ||||
| dynamic_thumbnails: true | ||||
|  | @ -1,5 +0,0 @@ | |||
| # This file is autogenerated, and will be recreated on upgrade if it is deleted. | ||||
| # Any changes you make will be preserved. | ||||
| 
 | ||||
| # Whether to report homeserver usage statistics. | ||||
| report_stats: true | ||||
|  | @ -1,43 +0,0 @@ | |||
| # This file is autogenerated, and will be recreated on upgrade if it is deleted. | ||||
| # Any changes you make will be preserved. | ||||
| 
 | ||||
| # The domain name of the server, with optional explicit port. | ||||
| # This is used by remote servers to connect to this server, | ||||
| # e.g. matrix.org, localhost:8080, etc. | ||||
| # This is also the last part of your UserID. | ||||
| # | ||||
| server_name: example.com | ||||
| 
 | ||||
| # The rest is our local configuration: | ||||
| public_baseurl: https://matrix.example.com/ | ||||
| 
 | ||||
| presence: | ||||
|   enabled: true | ||||
|   include_offline_users_on_sync: false | ||||
| 
 | ||||
| require_auth_for_profile_requests: true | ||||
| allow_public_rooms_over_federation: true | ||||
| 
 | ||||
| ip_range_blacklist: | ||||
|   - '127.0.0.0/8' | ||||
|   - '10.0.0.0/8' | ||||
|   - '172.16.0.0/12' | ||||
|   - '192.168.0.0/16' | ||||
|   - '100.64.0.0/10' | ||||
|   - '192.0.0.0/24' | ||||
|   - '169.254.0.0/16' | ||||
|   - '192.88.99.0/24' | ||||
|   - '198.18.0.0/15' | ||||
|   - '192.0.2.0/24' | ||||
|   - '198.51.100.0/24' | ||||
|   - '203.0.113.0/24' | ||||
|   - '224.0.0.0/4' | ||||
|   - '::1/128' | ||||
|   - 'fe80::/10' | ||||
|   - 'fc00::/7' | ||||
|   - '2001:db8::/32' | ||||
|   - 'ff00::/8' | ||||
|   - 'fec0::/10' | ||||
| 
 | ||||
| filter_timeline_limit: 500 | ||||
| delete_stale_devices_after: 1y | ||||
|  | @ -1,26 +0,0 @@ | |||
| # Necessary for server notices, and moderation | ||||
| 
 | ||||
| server_notices: | ||||
|   system_mxid_localpart: server | ||||
|   system_mxid_display_name: "Server Notices" | ||||
|   system_mxid_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" | ||||
|   room_name: "Server Notices" | ||||
|   room_avatar_url: "mxc://example.com/QBBZcaxfrrpvreGeNhqRaCjG" | ||||
|   room_topic: "Room used by your server admin to notice you of important information" | ||||
|   auto_join: true | ||||
| 
 | ||||
| user_consent: | ||||
|   require_at_registration: true | ||||
|   policy_name: "Example End User Policy" | ||||
|   template_dir: consent_policy | ||||
|   version: 0.2 | ||||
|   server_notice_content: | ||||
|     msgtype: m.text | ||||
|     body: >- | ||||
|       You have to agree to our End User Policy before you can use this | ||||
|       service. Please read and accept it at %(consent_uri)s. | ||||
|   block_events_error: >- | ||||
|     You haven't accepted the End User Policy yet, so you can't post any | ||||
|     messages yet. Please read and accept the policy at %(consent_uri)s. | ||||
| 
 | ||||
| form_secret: "<secure password>" | ||||
|  | @ -1,9 +0,0 @@ | |||
| # This configures the connection to the TURN server | ||||
| 
 | ||||
| turn_shared_secret: "<secure key>" | ||||
| turn_uris: | ||||
|   - "turn:turn.example.com?transport=udp" | ||||
|   - "turn:turn.example.com?transport=tcp" | ||||
| turn_user_lifetime: 86400000 | ||||
| turn_allow_guests: true | ||||
| 
 | ||||
|  | @ -1,34 +0,0 @@ | |||
| # Configuration file for Synapse. | ||||
| # | ||||
| # This is a YAML file: see [1] for a quick introduction. Note in particular | ||||
| # that *indentation is important*: all the elements of a list or dictionary | ||||
| # should have the same indentation. | ||||
| # | ||||
| # [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html | ||||
| # | ||||
| # For more information on how to configure Synapse, including a complete accounting of | ||||
| # each option, go to docs/usage/configuration/config_documentation.md or | ||||
| # https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html | ||||
| # | ||||
| # This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations. | ||||
| # server_name: "SERVERNAME" | ||||
| pid_file: "/var/run/matrix-synapse.pid" | ||||
| listeners: | ||||
|   - port: 8008 | ||||
|     tls: false | ||||
|     type: http | ||||
|     x_forwarded: true | ||||
|     bind_addresses: ['::1', '127.0.0.1'] | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
|         compress: false | ||||
| #database: | ||||
| #  name: sqlite3 | ||||
| #  args: | ||||
| #    database: /var/lib/matrix-synapse/homeserver.db | ||||
| log_config: "/etc/matrix-synapse/log.yaml" | ||||
| trusted_key_servers: | ||||
|   - server_name: "matrix.org" | ||||
							
								
								
									
										10
									
								
								matrix/synapse/monolithic/README.md
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								matrix/synapse/monolithic/README.md
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,10 @@ | |||
| --- | ||||
| gitea: none | ||||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Standard, monolithic configuration | ||||
| 
 | ||||
| This configuration will be enough for most installations. | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1,43 +0,0 @@ | |||
| <!doctype html> | ||||
| <html lang="en"> | ||||
|   <head> | ||||
|     <title>Example End User Policy</title> | ||||
|   </head> | ||||
|   <body> | ||||
|   {% if has_consented %} | ||||
|     <p> | ||||
|       You have already accepted the Example End User Policy. | ||||
|     </p> | ||||
|   {% else %} | ||||
| <h1>Example End User Policy</h1> | ||||
| 
 | ||||
| These are the terms under which you can use this service. Unless you accept these terms, you | ||||
| will not be allowed to send any messages. | ||||
| 
 | ||||
| <ol> | ||||
|   <li>You will not be abusive to other users, be they on this server or on an other. | ||||
|   <li>You will not do other nasty stuff. | ||||
|   <li>Basically: you will behave like a good person. | ||||
| </ol> | ||||
| 
 | ||||
| We promise you a few things too: | ||||
| 
 | ||||
| <ol> | ||||
|   <li>We'll keep your data safe | ||||
|   <li>We won't snoop on you | ||||
|   <li>We'll only turn you in with the authorities if you do nasty stuff. | ||||
| </ol> | ||||
| 
 | ||||
| If you accept these terms, you can use this system. | ||||
|     {% if not public_version %} | ||||
|       <!-- The variables used here are only provided when the 'u' param is given to the homeserver --> | ||||
|       <form method="post" action="consent"> | ||||
|         <input type="hidden" name="v" value="{{version}}"/> | ||||
|         <input type="hidden" name="u" value="{{user}}"/> | ||||
|         <input type="hidden" name="h" value="{{userhmac}}"/> | ||||
|         <input type="submit" value="I accept"/> | ||||
|       </form> | ||||
|     {% endif %} | ||||
|   {% endif %} | ||||
|   </body> | ||||
| </html> | ||||
|  | @ -1,11 +0,0 @@ | |||
| <!doctype html> | ||||
| <html lang="en"> | ||||
|   <head> | ||||
|     <title>Example End User Policy</title> | ||||
|   </head> | ||||
|   <body> | ||||
| <p>You have agreed to our End User Policy, you can now use our service.</p> | ||||
| 
 | ||||
| <p>Have fun!</p> | ||||
|   </body> | ||||
| </html> | ||||
|  | @ -1,12 +0,0 @@ | |||
| { | ||||
|     "m.homeserver": { | ||||
|         "base_url": "https://matrix.example.com" | ||||
|     }, | ||||
| 
 | ||||
|     "org.matrix.msc4143.rtc_foci":[ | ||||
|         { | ||||
|             "type": "livekit", | ||||
|             "livekit_service_url": "https://livekit.example.com" | ||||
|         } | ||||
|     ] | ||||
| } | ||||
|  | @ -1 +0,0 @@ | |||
| {"m.server": "matrix.example.com"} | ||||
|  | @ -1,17 +0,0 @@ | |||
| { | ||||
|     "contacts": [ | ||||
|         { | ||||
|             "email_address": "admin@example.com", | ||||
|             "matrix_id": "@john:example.com", | ||||
|             "role": "m.role.admin" | ||||
| 	}, | ||||
| 
 | ||||
|         { | ||||
|             "email_address": "security@example.com", | ||||
|             "matrix_id": "@bob:example.com", | ||||
|             "role": "m.role.security" | ||||
|         } | ||||
|     ], | ||||
| 
 | ||||
|     "support_page": "https://support.example.com/" | ||||
| } | ||||
|  | @ -3,591 +3,9 @@ gitea: none | |||
| include_toc: true | ||||
| --- | ||||
| 
 | ||||
| # Introduction to a worker-based setup | ||||
| # Advanced configuration with workers | ||||
| 
 | ||||
| Very busy servers are brought down because a single thread can't keep up with | ||||
| the load. So you want to create several threads for different types of work. | ||||
| This configuration allows optimizing performance, meant for big, busy | ||||
| installations. | ||||
| 
 | ||||
| See this [Matrix blog](https://matrix.org/blog/2020/11/03/how-we-fixed-synapse-s-scalability/) | ||||
| for some background information. | ||||
| 
 | ||||
| The traditional Synapse setup is one monolithic piece of software that does | ||||
| everything. Joining a very busy room makes a bottleneck, as the server will | ||||
| spend all its cycles on synchronizing that room. | ||||
| 
 | ||||
| You can split the server into workers, that are basically Synapse servers | ||||
| themselves. Redirect specific tasks to them and you have several different | ||||
| servers doing all kinds of tasks at the same time. A busy room will no longer | ||||
| freeze the rest. | ||||
| 
 | ||||
| Workers communicate with each other via UNIX sockets and Redis. We choose | ||||
| UNIX sockets because they're much more efficient than network sockets. Of | ||||
| course, if you scale to more than one machine, you will need network sockets | ||||
| instead. | ||||
| 
 | ||||
| **Important note** | ||||
| 
 | ||||
| While the use of workers can drastically improve speed, the law of diminished | ||||
| returns applies. Splitting off more and more workers will not further improve | ||||
| speed after a certain point. Plus: you need to understand what the most | ||||
| resource-consuming tasks are before you can start to plan how many workers for | ||||
| what tasks you need. | ||||
| 
 | ||||
| In this document we'll basically create a worker for every task, and several | ||||
| workers for a few heavy tasks, as an example. You mileage may not only vary, it | ||||
| will. | ||||
| 
 | ||||
| Tuning the rest of the machine and network also counts, especially PostgreSQL. | ||||
| A well-tuned PostgreSQL can make a really big difference and should probably | ||||
| be considered even before configuring workers. | ||||
| 
 | ||||
| With workers, PostgreSQL's configuration should be changed accordingly: see | ||||
| [Tuning PostgreSQL for a Matrix Synapse | ||||
| server](https://tcpipuk.github.io/postgres/tuning/index.html) for hints and | ||||
| examples. | ||||
| 
 | ||||
| A worker-based Synapse is tailor-made, there is no one-size-fits-all approach. | ||||
| All we can do here is explain how things work, what to consider and how to | ||||
| build what you need by providing examples. | ||||
| 
 | ||||
| 
 | ||||
| # Redis | ||||
| 
 | ||||
| Workers need Redis as part of their communication, so our first step will be | ||||
| to install Redis. | ||||
| 
 | ||||
| ``` | ||||
| apt install redis-server | ||||
| ``` | ||||
| 
 | ||||
| For less overhead we use a UNIX socket instead of a network connection to | ||||
| localhost. Disable the TCP listener and enable the socket in | ||||
| `/etc/redis/redis.conf`: | ||||
| 
 | ||||
| ``` | ||||
| port 0 | ||||
| 
 | ||||
| unixsocket /run/redis/redis-server.sock | ||||
| unixsocketperm 770 | ||||
| ``` | ||||
| 
 | ||||
| Our matrix user (`matrix-synapse`) has to be able to read from and write to | ||||
| that socket, which is created by Redis and owned by `redis:redis`, so we add | ||||
| user `matrix-synapse` to the group `redis`. You may come up with a | ||||
| finer-grained permission solution, but for our example this will do. | ||||
| 
 | ||||
| ``` | ||||
| adduser matrix-synapse redis | ||||
| ``` | ||||
| 
 | ||||
| Restart Redis for these changes to take effect. Check for error messages in | ||||
| the logs, if port 6379 is no longer active, and if the socketfile | ||||
| `/run/redis/redis-server.sock` exists. | ||||
| 
 | ||||
| Now point Synapse at Redis in `conf.d/redis.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| redis: | ||||
|   enabled: true | ||||
|   path: /run/redis/redis-server.sock | ||||
| ``` | ||||
| 
 | ||||
| Restart Synapse and check if it can connect to Redis via the socket, you should find log | ||||
| entries like this: | ||||
| 
 | ||||
| ``` | ||||
| synapse.replication.tcp.redis - 292 - INFO - sentinel - Connecting to redis server UNIXAddress('/run/redis/redis-server.sock') | ||||
| synapse.util.httpresourcetree - 56 - INFO - sentinel - Attaching <synapse.replication.http.ReplicationRestResource object at 0x7f95f850d150> to path b'/_synapse/replication' | ||||
| synapse.replication.tcp.redis - 126 - INFO - sentinel - Connected to redis | ||||
| synapse.replication.tcp.redis - 138 - INFO - subscribe-replication-0 - Sending redis SUBSCRIBE for ['matrix.example.com/USER_IP', 'matrix.example.com'] | ||||
| synapse.replication.tcp.redis - 141 - INFO - subscribe-replication-0 - Successfully subscribed to redis stream, sending REPLICATE command | ||||
| synapse.replication.tcp.redis - 146 - INFO - subscribe-replication-0 - REPLICATE successfully sent | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| # Synapse | ||||
| 
 | ||||
| Workers communicate with each other over sockets, that are all placed in one | ||||
| directory. These sockets are owned by `matrix-synapse:matrix-synapse`, so make | ||||
| sure nginx can write to them: add user `www-data` to group `matrix-synapse` | ||||
| and restart nginx. | ||||
| 
 | ||||
| Then, make sure systemd creates the directory for the sockets as soon as | ||||
| Synapse starts: | ||||
| 
 | ||||
| ``` | ||||
| systemctl edit matrix-synapse | ||||
| ``` | ||||
| 
 | ||||
| Now override parts of the `Service` stanza to add these two lines: | ||||
| 
 | ||||
| ``` | ||||
| [Service] | ||||
| RuntimeDirectory=matrix-synapse | ||||
| RuntimeDirectoryPreserve=yes | ||||
| ``` | ||||
| 
 | ||||
| The directory `/run/matrix-synapse` will be created as soon | ||||
| as Synapse starts, and will not be removed on restart or stop, because that | ||||
| would create problems with workers who suddenly lose their sockets. | ||||
| 
 | ||||
| Then we change Synapse from listening on `localhost:8008` to listening on a | ||||
| socket. We'll do most of our workers work in `conf.d/listeners.yaml`, so let's | ||||
| put the new listener configuration for the main proccess there. | ||||
| 
 | ||||
| Remove the `localhost:8008` stanza, and configure these two sockets: | ||||
| 
 | ||||
| ``` | ||||
| listeners: | ||||
|   - path: /run/matrix-synapse/inbound_main.sock | ||||
|     mode: 0660 | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
| 
 | ||||
|   - path: /run/matrix-synapse/replication_main.sock | ||||
|     mode: 0660 | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: | ||||
|         - replication  | ||||
| ``` | ||||
| 
 | ||||
| This means Synapse will create two sockets under `/run/matrix-synapse`: one | ||||
| for incoming traffic that is forwarded by nginx (`inbound_main.sock`), and one for | ||||
| communicating with all the other workers (`replication_main.sock`). | ||||
| 
 | ||||
| If you restart Synapse now, it won't do anything anymore, because nginx is | ||||
| still forwarding its traffic to `localhost:8008`. We'll get to nginx later, | ||||
| but for now you should change: | ||||
| 
 | ||||
| ``` | ||||
| proxy_forward http://localhost:8008; | ||||
| ``` | ||||
| 
 | ||||
| to | ||||
| 
 | ||||
| ``` | ||||
| proxy_forward http://unix:/run/matrix-synapse/inbound_main.sock; | ||||
| ``` | ||||
| 
 | ||||
| If you've done this, restart Synapse and nginx, and check if the sockets are created | ||||
| and have the correct permissions. | ||||
| 
 | ||||
| Synapse should work normally again, we've switched from network sockets to | ||||
| UNIX sockets, and added Redis. Now we'll create the actual workers. | ||||
| 
 | ||||
| 
 | ||||
| # Worker overview | ||||
| 
 | ||||
| Every worker is, in fact, a Synapse server, only with a limited set of tasks. | ||||
| Some tasks can be handled by a number of workers, others only by one. Every | ||||
| worker starts as a normal Synapse process, reading all the normal | ||||
| configuration files, and then a bit of configuration for the specific worker | ||||
| itself. | ||||
| 
 | ||||
| Workers need to communicate with each other and the main process, they do that | ||||
| via the `replication` sockets under `/run/matrix-synapse` and Redis. | ||||
| 
 | ||||
| Most worker also need a way to be fed traffic by nginx: they have an `inbound` | ||||
| socket for that, in the same directory. | ||||
| 
 | ||||
| Finally, all those replicating workers need to be registered in the main | ||||
| process: all workers and their replication sockets are listed in the `instance_map`. | ||||
| 
 | ||||
| 
 | ||||
| ## Types of workers | ||||
| 
 | ||||
| We'll make separate workers for almost every task, and several for the | ||||
| heaviest tasks: synchronising. An overview of what endpoints are to be | ||||
| forwarded to a worker is in [Synapse's documentation](https://element-hq.github.io/synapse/latest/workers.html#available-worker-applications). | ||||
| 
 | ||||
| We'll create the following workers: | ||||
| 
 | ||||
| * login | ||||
| * federation_sender | ||||
| * mediaworker | ||||
| * userdir | ||||
| * pusher | ||||
| * push_rules | ||||
| * typing | ||||
| * todevice | ||||
| * accountdata | ||||
| * presence | ||||
| * receipts | ||||
| * initial_sync: 1 and 2 | ||||
| * normal_sync: 1, 2 and 3 | ||||
| 
 | ||||
| Some of them are `stream_writers`, and the [documentation about | ||||
| stream_witers](https://element-hq.github.io/synapse/latest/workers.html#stream-writers) | ||||
| says: | ||||
| 
 | ||||
| ``` | ||||
| Note: The same worker can handle multiple streams, but unless otherwise documented, each stream can only have a single writer. | ||||
| ``` | ||||
| 
 | ||||
| So, stream writers must have unique tasks: you can't have two or more workers | ||||
| writing to the same stream. Stream writers have to be listed in `stream_writers`: | ||||
| 
 | ||||
| ``` | ||||
| stream_writers: | ||||
|   account_data: | ||||
|     - accountdata | ||||
|   presence: | ||||
|     - presence | ||||
|   receipts: | ||||
|     - receipts | ||||
|   to_device: | ||||
|     - todevice | ||||
|   typing: | ||||
|     - typing | ||||
|   push_rules: | ||||
|     - push_rules | ||||
| ``` | ||||
| 
 | ||||
| As you can see, we've given the stream workers the name of the stream they're | ||||
| writing to. We could combine all those streams into one worker, which would | ||||
| probably be enough for most instances. | ||||
| 
 | ||||
| We could define a worker with the name streamwriter and list it under all | ||||
| streams instead of a single worker for every stream. | ||||
| 
 | ||||
| Finally, we have to list all these workers under `instance_map`: their name | ||||
| and their replication socket: | ||||
| 
 | ||||
| ``` | ||||
| instance_map: | ||||
|   main: | ||||
|     path: "/run/matrix-synapse/replication_main.sock" | ||||
|   login: | ||||
|     path: "/run/matrix-synapse/replication_login.sock" | ||||
|   federation_sender: | ||||
|     path: "/run/matrix-synapse/replication_federation_sender.sock" | ||||
|   mediaworker: | ||||
|     path: "/run/matrix-synapse/replication_mediaworker.sock" | ||||
| ... | ||||
|   normal_sync1: | ||||
|     path: "unix:/run/matrix-synapse/replication_normal_sync1.sock" | ||||
|   normal_sync2: | ||||
|     path: "unix:/run/matrix-synapse/replication_normal_sync2.sock" | ||||
|   normal_sync3: | ||||
|     path: "unix:/run/matrix-synapse/replication_normal_sync3.sock" | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ## Defining a worker | ||||
| 
 | ||||
| Every working starts with the normal configuration files, and then loads its | ||||
| own. We put those files under `/etc/matrix-synapse/workers`. You have to | ||||
| create that directory, and make sure Synapse can read them. Being | ||||
| profesionally paranoid, we restrict access to that directory and the files in | ||||
| it: | ||||
| 
 | ||||
| ``` | ||||
| mkdir /etc/matrix-synapse/workers | ||||
| chown matrix-synapse:matrix-synapse /etc/matrix-synapse/workers | ||||
| chmod 750 /etc/matrix-synapse-workers | ||||
| ``` | ||||
| 
 | ||||
| We'll fill this directory with `yaml` files; one for each worker. | ||||
| 
 | ||||
| 
 | ||||
| ### Generic worker | ||||
| 
 | ||||
| Workers look very much the same, very little configuration is needed. This is | ||||
| what you need: | ||||
| 
 | ||||
| * name | ||||
| * replication socket (not every worker needs this) | ||||
| * inbound socket (not every worker needs this) | ||||
| * log configuration | ||||
| 
 | ||||
| One worker we use handles the login actions, this is how it's configured in | ||||
| /etc/matrix-synapse/workers/login.yaml`: | ||||
| 
 | ||||
| ``` | ||||
| worker_app: "synapse.app.generic_worker" | ||||
| worker_name: "login" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/login.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
|   - path: "/run/matrix-synapse/inbound_login.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/replication_login.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| ``` | ||||
| 
 | ||||
| The first line defines the type of worker. In the past there were quite a few | ||||
| different types, but most of them have been phased out in favour of one | ||||
| generic worker. | ||||
| 
 | ||||
| The first listener is the socket where nginx sends all traffic related to logins | ||||
| to. You have to configure nginx to do that, we'll get to that later. | ||||
| 
 | ||||
| The `worker_log_config` defines how and where the worker logs. Of course you'll | ||||
| need to configure that too, see further. | ||||
| 
 | ||||
| The first `listener` is the inbound socket, that nginx uses to forward login | ||||
| related traffic to. Make sure nginx can write to this socket. The | ||||
| `resources` vary between workers. | ||||
| 
 | ||||
| The second `listener` is used for communication with the other workers and the | ||||
| main thread. The only `resource` it needs is `replication`. This socket needs | ||||
| to be listed in the `instance_map` in the main thread, the inbound socket does | ||||
| not. | ||||
| 
 | ||||
| Of course, if you need to scale up to the point where you need more than one | ||||
| machine, these listeners can no longer use UNIX sockets, but will have to use | ||||
| the network. This creates extra overhead, so you want to use sockets whenever | ||||
| possible. | ||||
| 
 | ||||
| 
 | ||||
| ### Media worker | ||||
| 
 | ||||
| The media worker is slightly different than the generic one. It doesn't use the | ||||
| `synapse.app.generic_worker`, but a specialised one: `synapse.app.media_repository`. | ||||
| To prevent the main process from handling media itself, you have to explicitly | ||||
| tell it to leave that to the worker, by adding this to the configuration (in | ||||
| our setup `conf.d/listeners.yaml`): | ||||
| 
 | ||||
| ``` | ||||
| enable_media_repo: false | ||||
| media_instance_running_background_jobs: mediaworker | ||||
| ``` | ||||
| 
 | ||||
| The worker `mediaworker` looks like this: | ||||
| 
 | ||||
| ``` | ||||
| worker_app: "synapse.app.media_repository" | ||||
| worker_name: "mediaworker" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/media.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
|   - path: "/run/matrix-synapse/inbound_mediaworker.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [media] | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/replication_mediaworker.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| ``` | ||||
| 
 | ||||
| If you use more than one mediaworker, know that they must all run on the same | ||||
| machine; scaling it over more than one machine will not work. | ||||
| 
 | ||||
| 
 | ||||
| ## Worker logging | ||||
| 
 | ||||
| As stated before, you configure the logging of workers in a separate yaml | ||||
| file. As with the definitions of the workers themselves, you need a directory for | ||||
| that. We'll use `/etc/matrix-synapse/logconf.d` for that; make it and fix the | ||||
| permissions. | ||||
| 
 | ||||
| ``` | ||||
| mkdir /etc/matrix-synapse/logconf.d | ||||
| chgrp matrix-synapse /etc/matrix-synapse/logconf.d | ||||
| chmod 750 /etc/matrix-synapse/logconf.d | ||||
| ``` | ||||
| 
 | ||||
| There's a lot you can configure for logging, but for now we'll give every | ||||
| worker the same layout. Here's the configuration for the `login` worker: | ||||
| 
 | ||||
| ``` | ||||
| version: 1 | ||||
| formatters: | ||||
|   precise: | ||||
|     format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' | ||||
| handlers: | ||||
|   file: | ||||
|     class: logging.handlers.TimedRotatingFileHandler | ||||
|     formatter: precise | ||||
|     filename: /var/log/matrix-synapse/login.log | ||||
|     when: midnight | ||||
|     backupCount: 3 | ||||
|     encoding: utf8 | ||||
| 
 | ||||
|   buffer: | ||||
|     class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler | ||||
|     target: file | ||||
|     capacity: 10 | ||||
|     flushLevel: 30 | ||||
|     period: 5 | ||||
| 
 | ||||
| loggers: | ||||
|   synapse.metrics: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.replication.tcp: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.util.caches.lrucache: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   twisted: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse: | ||||
|     level: INFO | ||||
|     handlers: [buffer] | ||||
| 
 | ||||
| root: | ||||
|   level: INFO | ||||
|   handlers: [buffer] | ||||
| ``` | ||||
| 
 | ||||
| The only thing you need to change if the filename to which the logs are | ||||
| written. You could create only one configuration and use that in every worker, | ||||
| but that would mean all logs will end up in the same file, which is probably | ||||
| not what you want. | ||||
| 
 | ||||
| See the [Python | ||||
| documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema) | ||||
| for all the ins and outs of logging. | ||||
| 
 | ||||
| 
 | ||||
| # Systemd | ||||
| 
 | ||||
| You want Synapse and its workers managed by systemd. First of all we define a | ||||
| `target`: a group of services that belong together. | ||||
| 
 | ||||
| ``` | ||||
| systemctl edit --force --full matrix-synapse.target | ||||
| ``` | ||||
| 
 | ||||
| Feed it with this bit: | ||||
| 
 | ||||
| ``` | ||||
| [Unit] | ||||
| Description=Matrix Synapse with all its workers | ||||
| After=network.target | ||||
| 
 | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
| ``` | ||||
| 
 | ||||
| First add `matrix-synapse.service` to this target by overriding the `WantedBy` | ||||
| in the unit file. We're overriding and adding a bit more. | ||||
| 
 | ||||
| ``` | ||||
| systemctl edit matrix-synapse.service | ||||
| ``` | ||||
| 
 | ||||
| Add this to the overrides: | ||||
| 
 | ||||
| ``` | ||||
| [Unit] | ||||
| PartOf=matrix-synapse.target | ||||
| Before=matrix-synapse-worker | ||||
| ReloadPropagatedFrom=matrix-synapse.target | ||||
| 
 | ||||
| [Service] | ||||
| RuntimeDirectory=matrix-synapse | ||||
| RuntimeDirectoryMode=0770 | ||||
| RuntimeDirectoryPreserve=yes | ||||
| 
 | ||||
| [Install] | ||||
| WantedBy=matrix-synapse.target | ||||
| ``` | ||||
| 
 | ||||
| The additions under `Unit` mean that `matrix-synapse.service` is part of the | ||||
| target we created earlier, and that is should start before the workers. | ||||
| Restarting the target means this service must be restarted too. | ||||
| 
 | ||||
| Under `Service` we define the directory where the sockets live (`/run` is | ||||
| prefixed automatically), its permissions and that it should not be removed if | ||||
| the service is stopped. | ||||
| 
 | ||||
| The `WantedBy` under `Install` includes it in the target. The target itself is | ||||
| included in `multi-user.target`, so it should always be started in the multi-user | ||||
| runlevel. | ||||
| 
 | ||||
| For the workers we're using a template instead of separate unit files for every | ||||
| single one. Create the template: | ||||
| 
 | ||||
| ``` | ||||
| systemctl edit --full --force matrix-synapse-worker@ | ||||
| ``` | ||||
| 
 | ||||
| Mind the `@` at the end, that's not a typo. Fill it with this content: | ||||
| 
 | ||||
| ``` | ||||
| [Unit] | ||||
| Description=Synapse worker %i | ||||
| AssertPathExists=/etc/matrix-synapse/workers/%i.yaml | ||||
| 
 | ||||
| # This service should be restarted when the synapse target is restarted. | ||||
| PartOf=matrix-synapse.target | ||||
| ReloadPropagatedFrom=matrix-synapse.target | ||||
| 
 | ||||
| # if this is started at the same time as the main, let the main process start | ||||
| # first, to initialise the database schema. | ||||
| After=matrix-synapse.service | ||||
| 
 | ||||
| [Service] | ||||
| Type=notify | ||||
| NotifyAccess=main | ||||
| User=matrix-synapse | ||||
| Group=matrix-synapse | ||||
| WorkingDirectory=/var/lib/matrix-synapse | ||||
| ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml | ||||
| ExecReload=/bin/kill -HUP $MAINPID | ||||
| Restart=always | ||||
| RestartSec=3 | ||||
| SyslogIdentifier=matrix-synapse-%i | ||||
| 
 | ||||
| [Install] | ||||
| WantedBy=matrix-synapse.target | ||||
| ``` | ||||
| 
 | ||||
| Now you can start/stop/restart every worker individually. Starting the `login` | ||||
| worker would be done by: | ||||
| 
 | ||||
| ``` | ||||
| systemctl start matrix-synapse-worker@login | ||||
| ``` | ||||
| 
 | ||||
| Every worker needs to be enabled and started individually. Quickest way to do | ||||
| that, is to run a loop in the directory: | ||||
| 
 | ||||
| ``` | ||||
| cd /etc/matrix-synapse/workers | ||||
| for worker in `ls *yaml | sed -n 's/\.yaml//p'`; do systemctl enable matrix-synapse-worker@$worker; done | ||||
| ``` | ||||
| 
 | ||||
| After a reboot, Synapse and all its workers should be started. But starting | ||||
| the target should also do that: | ||||
| 
 | ||||
| ``` | ||||
| systemctl start matrix-synapse.target | ||||
| ``` | ||||
| 
 | ||||
| This should start `matrix-synapse.service` first, the main worker. After that | ||||
| all the workers should be started too. Check if the correct sockets appear and | ||||
| if there are any error messages in the logs. | ||||
| 
 | ||||
| 
 | ||||
| # nginx | ||||
| 
 | ||||
| We may have a lot of workers, but if nginx doesn't forward traffic to the | ||||
| correct worker(s), it won't work. We're going to have to change nginx's | ||||
| configuration quite a bit. | ||||
| 
 | ||||
| See [Deploying a Synapse Homeserver with | ||||
| Docker](https://tcpipuk.github.io/synapse/deployment/nginx.html) for the | ||||
| inspiration. This details a Docker installation, which we don't have, but the | ||||
| reasoning behind it applies to our configuration too. | ||||
| 
 | ||||
| Here's [how to configure nginx for use with workers](../../nginx/workers). | ||||
|  |  | |||
|  | @ -1,15 +0,0 @@ | |||
| worker_app: "synapse.app.generic_worker" | ||||
| worker_name: "federation_reader1" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/federation_reader-log.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
|   - path: "/run/matrix-synapse/replication_federation_reader1.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/inbound_federation_reader1.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [federation] | ||||
| 
 | ||||
|  | @ -1,10 +0,0 @@ | |||
| worker_app: "synapse.app.generic_worker" | ||||
| worker_name: "federation_sender1" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/federation_sender-log.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
|   - path: "/run/matrix-synapse/replication_federation_sender1.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| 
 | ||||
|  | @ -1,19 +0,0 @@ | |||
| worker_app: "synapse.app.generic_worker" | ||||
| worker_name: "initial_sync1" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/initial_sync-log.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/inbound_initial_sync1.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/replication_initial_sync1.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| 
 | ||||
|  | @ -1,41 +0,0 @@ | |||
| version: 1 | ||||
| formatters: | ||||
|   precise: | ||||
|     format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' | ||||
| handlers: | ||||
|   file: | ||||
|     class: logging.handlers.TimedRotatingFileHandler | ||||
|     formatter: precise | ||||
|     filename: /var/log/matrix-synapse/login.log | ||||
|     when: midnight | ||||
|     backupCount: 3 | ||||
|     encoding: utf8 | ||||
| 
 | ||||
|   buffer: | ||||
|     class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler | ||||
|     target: file | ||||
|     capacity: 10 | ||||
|     flushLevel: 30 | ||||
|     period: 5 | ||||
| 
 | ||||
| loggers: | ||||
|   synapse.metrics: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.replication.tcp: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.util.caches.lrucache: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   twisted: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse: | ||||
|     level: INFO | ||||
|     handlers: [buffer] | ||||
| 
 | ||||
| root: | ||||
|   level: INFO | ||||
|   handlers: [buffer] | ||||
| 
 | ||||
|  | @ -1,19 +0,0 @@ | |||
| worker_app: "synapse.app.generic_worker" | ||||
| worker_name: "login" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/login-log.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/inbound_login.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: | ||||
|         - client | ||||
|         - consent | ||||
|         - federation | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/replication_login.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| 
 | ||||
|  | @ -1,41 +0,0 @@ | |||
| version: 1 | ||||
| formatters: | ||||
|   precise: | ||||
|     format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' | ||||
| handlers: | ||||
|   file: | ||||
|     class: logging.handlers.TimedRotatingFileHandler | ||||
|     formatter: precise | ||||
|     filename: /var/log/matrix-synapse/media.log | ||||
|     when: midnight | ||||
|     backupCount: 3 | ||||
|     encoding: utf8 | ||||
| 
 | ||||
|   buffer: | ||||
|     class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler | ||||
|     target: file | ||||
|     capacity: 10 | ||||
|     flushLevel: 30 | ||||
|     period: 5 | ||||
| 
 | ||||
| loggers: | ||||
|   synapse.metrics: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.replication.tcp: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse.util.caches.lrucache: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   twisted: | ||||
|     level: WARN | ||||
|     handlers: [buffer] | ||||
|   synapse: | ||||
|     level: INFO | ||||
|     handlers: [buffer] | ||||
| 
 | ||||
| root: | ||||
|   level: INFO | ||||
|   handlers: [buffer] | ||||
| 
 | ||||
|  | @ -1,15 +0,0 @@ | |||
| worker_app: "synapse.app.media_repository" | ||||
| worker_name: "mediaworker" | ||||
| worker_log_config: "/etc/matrix-synapse/logconf.d/media-log.yaml" | ||||
| 
 | ||||
| worker_listeners: | ||||
|   - path: "/run/matrix-synapse/inbound_mediaworker.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [media] | ||||
| 
 | ||||
|   - path: "/run/matrix-synapse/replication_mediaworker.sock" | ||||
|     type: http | ||||
|     resources: | ||||
|       - names: [replication] | ||||
| 
 | ||||
							
								
								
									
										10
									
								
								panel/.envrc
									
										
									
									
									
								
							
							
						
						
									
										10
									
								
								panel/.envrc
									
										
									
									
									
								
							|  | @ -1,10 +0,0 @@ | |||
| #!/usr/bin/env bash | ||||
| # the shebang is ignored, but nice for editors | ||||
| 
 | ||||
| # shellcheck shell=bash | ||||
| if type -P lorri &>/dev/null; then | ||||
|   eval "$(lorri direnv)" | ||||
| else | ||||
|   echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]' | ||||
|   use_nix | ||||
| fi | ||||
							
								
								
									
										13
									
								
								panel/.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								panel/.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -1,13 +0,0 @@ | |||
| # Nix | ||||
| .direnv | ||||
| result* | ||||
| 
 | ||||
| # Python | ||||
| *.pyc | ||||
| __pycache__ | ||||
| 
 | ||||
| # Django, application-specific | ||||
| db.sqlite3 | ||||
| src/db.sqlite3 | ||||
| src/static | ||||
| .credentials | ||||
|  | @ -1,46 +0,0 @@ | |||
| # Fediversity Panel | ||||
| 
 | ||||
| The Fediversity Panel is a web service for managing Fediversity deployments with a graphical user interface, written in Django. | ||||
| 
 | ||||
| ## Development | ||||
| 
 | ||||
| - To obtain all tools related to this project, enter the development environment with `nix-shell`. | ||||
| 
 | ||||
|   If you want to do that automatically on entering this directory: | ||||
| 
 | ||||
|   - [Set up `direnv`](https://github.com/nix-community/nix-direnv#installation) | ||||
|   - Run `direnv allow` in the directory where repository is stored on your machine | ||||
| 
 | ||||
|     > **Note** | ||||
|     > | ||||
|     > This is a security boundary, and allows automatically running code from this repository on your machine. | ||||
| 
 | ||||
| - Run NixOS integration tests and Django unit tests: | ||||
| 
 | ||||
|   ```bash | ||||
|   nix-build -A tests | ||||
|   ``` | ||||
| 
 | ||||
| - List all available Django management commands with: | ||||
| 
 | ||||
|   ```shell-session | ||||
|   manage | ||||
|   ``` | ||||
| 
 | ||||
| - Run the server locally | ||||
| 
 | ||||
|   ```shell-session | ||||
|   manage runserver | ||||
|   ``` | ||||
| 
 | ||||
| - Whenever you add a field in the database schema, run: | ||||
| 
 | ||||
|   ```console | ||||
|   manage makemigrations | ||||
|   ``` | ||||
| 
 | ||||
|   Then before starting the server again, run: | ||||
| 
 | ||||
|   ``` | ||||
|   manage migrate | ||||
|   ``` | ||||
|  | @ -1,53 +0,0 @@ | |||
| { | ||||
|   system ? builtins.currentSystem, | ||||
|   sources ? import ../npins, | ||||
|   pkgs ? import sources.nixpkgs { | ||||
|     inherit system; | ||||
|     config = { }; | ||||
|     overlays = [ ]; | ||||
|   }, | ||||
| }: | ||||
| let | ||||
|   package = | ||||
|     let | ||||
|       callPackage = pkgs.lib.callPackageWith (pkgs // pkgs.python3.pkgs); | ||||
|     in | ||||
|     callPackage ./nix/package.nix { }; | ||||
| 
 | ||||
|   pkgs' = pkgs.extend (_final: _prev: { panel = package; }); | ||||
| 
 | ||||
|   manage = pkgs.writeScriptBin "manage" '' | ||||
|     exec ${pkgs.lib.getExe pkgs.python3} ${toString ./src/manage.py} $@ | ||||
|   ''; | ||||
| in | ||||
| { | ||||
|   shell = pkgs.mkShellNoCC { | ||||
|     inputsFrom = [ package ]; | ||||
|     packages = [ | ||||
|       pkgs.npins | ||||
|       manage | ||||
|     ]; | ||||
|     env = { | ||||
|       NPINS_DIRECTORY = toString ../npins; | ||||
|     }; | ||||
|     shellHook = '' | ||||
|       # in production, secrets are passed via CREDENTIALS_DIRECTORY by systemd. | ||||
|       # use this directory for testing with local secrets | ||||
|       mkdir -p .credentials | ||||
|       echo secret > ${builtins.toString ./.credentials}/SECRET_KEY | ||||
|       export CREDENTIALS_DIRECTORY=${builtins.toString ./.credentials} | ||||
|       export DATABASE_URL="sqlite:///${toString ./src}/db.sqlite3" | ||||
|     ''; | ||||
|   }; | ||||
| 
 | ||||
|   tests = pkgs'.callPackage ./nix/tests.nix { }; | ||||
|   inherit package; | ||||
| 
 | ||||
|   # re-export inputs so they can be overridden granularly | ||||
|   # (they can't be accessed from the outside any other way) | ||||
|   inherit | ||||
|     sources | ||||
|     system | ||||
|     pkgs | ||||
|     ; | ||||
| } | ||||
|  | @ -1,199 +0,0 @@ | |||
| { | ||||
|   config, | ||||
|   pkgs, | ||||
|   lib, | ||||
|   ... | ||||
| }: | ||||
| let | ||||
|   inherit (lib) | ||||
|     concatStringsSep | ||||
|     mapAttrsToList | ||||
|     mkDefault | ||||
|     mkEnableOption | ||||
|     mkIf | ||||
|     mkOption | ||||
|     mkPackageOption | ||||
|     optionalString | ||||
|     types | ||||
|     ; | ||||
|   inherit (pkgs) writeShellApplication; | ||||
| 
 | ||||
|   # TODO: configure the name globally for everywhere it's used | ||||
|   name = "panel"; | ||||
| 
 | ||||
|   cfg = config.services.${name}; | ||||
| 
 | ||||
|   database-url = "sqlite:////var/lib/${name}/db.sqlite3"; | ||||
| 
 | ||||
|   python-environment = pkgs.python3.withPackages ( | ||||
|     ps: with ps; [ | ||||
|       cfg.package | ||||
|       uvicorn | ||||
|     ] | ||||
|   ); | ||||
| 
 | ||||
|   configFile = pkgs.concatText "configuration.py" [ | ||||
|     ((pkgs.formats.pythonVars { }).generate "settings.py" cfg.settings) | ||||
|     (builtins.toFile "extra-settings.py" cfg.extra-settings) | ||||
|   ]; | ||||
| 
 | ||||
|   manage-service = writeShellApplication { | ||||
|     name = "manage"; | ||||
|     text = ''exec ${cfg.package}/bin/manage.py "$@"''; | ||||
|   }; | ||||
| 
 | ||||
|   manage-admin = writeShellApplication { | ||||
|     # This allows running the `manage` command in the system environment, e.g. to initialise an admin user | ||||
|     # Executing | ||||
|     name = "manage"; | ||||
|     text = | ||||
|       '' | ||||
|         systemd-run --pty \ | ||||
|           --same-dir \ | ||||
|           --wait \ | ||||
|           --collect \ | ||||
|           --service-type=exec \ | ||||
|           --unit "manage-${name}.service" \ | ||||
|           --property "User=${name}" \ | ||||
|           --property "Group=${name}" \ | ||||
|           --property "Environment=DATABASE_URL=${database-url} USER_SETTINGS_FILE=${configFile}" \ | ||||
|       '' | ||||
|       + optionalString (credentials != [ ]) ( | ||||
|         (concatStringsSep " \\\n" (map (cred: "--property 'LoadCredential=${cred}'") credentials)) + " \\\n" | ||||
|       ) | ||||
|       + '' | ||||
|         ${lib.getExe manage-service} "$@" | ||||
|       ''; | ||||
|   }; | ||||
| 
 | ||||
|   credentials = mapAttrsToList (name: secretPath: "${name}:${secretPath}") cfg.secrets; | ||||
| in | ||||
| # TODO: for a more clever and generic way of running Django services: | ||||
| #       https://git.dgnum.eu/mdebray/djangonix/ | ||||
| #       unlicensed at the time of writing, but surely worth taking some inspiration from... | ||||
| { | ||||
|   options.services.${name} = { | ||||
|     enable = mkEnableOption "Service configuration for `${name}`"; | ||||
|     # NOTE: this requires that the package is present in `pkgs` | ||||
|     package = mkPackageOption pkgs name { }; | ||||
|     production = mkOption { | ||||
|       type = types.bool; | ||||
|       default = true; | ||||
|     }; | ||||
|     restart = mkOption { | ||||
|       description = "systemd restart behavior"; | ||||
|       type = types.enum [ | ||||
|         "no" | ||||
|         "on-success" | ||||
|         "on-failure" | ||||
|         "on-abnormal" | ||||
|         "on-abort" | ||||
|         "always" | ||||
|       ]; | ||||
|       default = "always"; | ||||
|     }; | ||||
|     domain = mkOption { type = types.str; }; | ||||
|     host = mkOption { | ||||
|       type = types.str; | ||||
|       default = "127.0.0.1"; | ||||
|     }; | ||||
|     port = mkOption { | ||||
|       type = types.port; | ||||
|       default = 8000; | ||||
|     }; | ||||
|     settings = mkOption { | ||||
|       type = types.attrsOf types.anything; | ||||
|       default = { | ||||
|         STATIC_ROOT = mkDefault "/var/lib/${name}/static"; | ||||
|         DEBUG = mkDefault false; | ||||
|         ALLOWED_HOSTS = mkDefault [ | ||||
|           cfg.domain | ||||
|           cfg.host | ||||
|           "localhost" | ||||
|           "[::1]" | ||||
|         ]; | ||||
|         CSRF_TRUSTED_ORIGINS = mkDefault [ "https://${cfg.domain}" ]; | ||||
|         COMPRESS_OFFLINE = true; | ||||
|         LIBSASS_OUTPUT_STYLE = "compressed"; | ||||
|       }; | ||||
|       description = '' | ||||
|         Django configuration as an attribute set. | ||||
|         Name-value pairs will be converted to Python variable assignments. | ||||
|       ''; | ||||
|     }; | ||||
|     extra-settings = mkOption { | ||||
|       type = types.lines; | ||||
|       default = ""; | ||||
|       description = '' | ||||
|         Django configuration written in Python verbatim. | ||||
|         Contents will be appended to the definitions in `settings`. | ||||
|       ''; | ||||
|     }; | ||||
|     secrets = mkOption { | ||||
|       type = types.attrsOf types.path; | ||||
|       default = { }; | ||||
|     }; | ||||
|   }; | ||||
| 
 | ||||
|   config = mkIf cfg.enable { | ||||
|     environment.systemPackages = [ manage-admin ]; | ||||
| 
 | ||||
|     services = { | ||||
|       nginx.enable = true; | ||||
|       nginx.virtualHosts = { | ||||
|         ${cfg.domain} = | ||||
|           { | ||||
|             locations = { | ||||
|               "/".proxyPass = "http://localhost:${toString cfg.port}"; | ||||
|               "/static/".alias = "/var/lib/${name}/static/"; | ||||
|             }; | ||||
|           } | ||||
|           // lib.optionalAttrs cfg.production { | ||||
|             enableACME = true; | ||||
|             forceSSL = true; | ||||
|           }; | ||||
|       }; | ||||
|     }; | ||||
| 
 | ||||
|     users.users.${name} = { | ||||
|       isSystemUser = true; | ||||
|       group = name; | ||||
|     }; | ||||
| 
 | ||||
|     users.groups.${name} = { }; | ||||
|     systemd.services.${name} = { | ||||
|       description = "${name} ASGI server"; | ||||
|       after = [ "network.target" ]; | ||||
|       wantedBy = [ "multi-user.target" ]; | ||||
|       path = [ | ||||
|         python-environment | ||||
|         manage-service | ||||
|       ]; | ||||
|       preStart = '' | ||||
|         # Auto-migrate on first run or if the package has changed | ||||
|         versionFile="/var/lib/${name}/package-version" | ||||
|         if [[ $(cat "$versionFile" 2>/dev/null) != ${cfg.package} ]]; then | ||||
|           manage migrate --no-input | ||||
|           manage collectstatic --no-input --clear | ||||
|           manage compress --force | ||||
|           echo ${cfg.package} > "$versionFile" | ||||
|         fi | ||||
|       ''; | ||||
|       script = '' | ||||
|         uvicorn ${name}.asgi:application --host ${cfg.host} --port ${toString cfg.port} | ||||
|       ''; | ||||
|       serviceConfig = { | ||||
|         Restart = "always"; | ||||
|         User = name; | ||||
|         WorkingDirectory = "/var/lib/${name}"; | ||||
|         StateDirectory = name; | ||||
|         RuntimeDirectory = name; | ||||
|         LogsDirectory = name; | ||||
|       } // lib.optionalAttrs (credentials != [ ]) { LoadCredential = credentials; }; | ||||
|       environment = { | ||||
|         USER_SETTINGS_FILE = "${configFile}"; | ||||
|         DATABASE_URL = database-url; | ||||
|       }; | ||||
|     }; | ||||
|   }; | ||||
| } | ||||
|  | @ -1,57 +0,0 @@ | |||
| { | ||||
|   lib, | ||||
|   buildPythonPackage, | ||||
|   setuptools, | ||||
|   django_4, | ||||
|   django-compressor, | ||||
|   django-libsass, | ||||
|   dj-database-url, | ||||
| }: | ||||
| let | ||||
|   src = | ||||
|     with lib.fileset; | ||||
|     toSource { | ||||
|       root = ../src; | ||||
|       fileset = intersection (gitTracked ../../.) ../src; | ||||
|     }; | ||||
|   pyproject = with lib; fromTOML pyproject-toml; | ||||
|   # TODO: define this globally | ||||
|   name = "panel"; | ||||
|   # TODO: we may want this in a file so it's easier to read statically | ||||
|   version = "0.0.0"; | ||||
|   pyproject-toml = '' | ||||
|     [project] | ||||
|     name = "Fediversity-Panel" | ||||
|     version = "${version}" | ||||
| 
 | ||||
|     [tool.setuptools] | ||||
|     packages = [ "${name}" ] | ||||
|     include-package-data = true | ||||
|   ''; | ||||
| in | ||||
| buildPythonPackage { | ||||
|   pname = name; | ||||
|   inherit (pyproject.project) version; | ||||
|   pyproject = true; | ||||
|   inherit src; | ||||
| 
 | ||||
|   preBuild = '' | ||||
|     echo "recursive-include ${name} *" > MANIFEST.in | ||||
|     cp ${builtins.toFile "source" pyproject-toml} pyproject.toml | ||||
|   ''; | ||||
| 
 | ||||
|   propagatedBuildInputs = [ | ||||
|     setuptools | ||||
|     django_4 | ||||
|     django-compressor | ||||
|     django-libsass | ||||
|     dj-database-url | ||||
|   ]; | ||||
| 
 | ||||
|   postInstall = '' | ||||
|     mkdir -p $out/bin | ||||
|     cp -v ${src}/manage.py $out/bin/manage.py | ||||
|     chmod +x $out/bin/manage.py | ||||
|     wrapProgram $out/bin/manage.py --prefix PYTHONPATH : "$PYTHONPATH" | ||||
|   ''; | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		
		Reference in a new issue