Compare commits

...

94 commits

Author SHA1 Message Date
Hans van Zijst
8c7ee15bcc Merge branch 'main' into hans/documentation
All checks were successful
/ check-pre-commit (pull_request) Successful in 20s
2024-11-28 15:54:17 +01:00
f67c012dfe fix relative path computation
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-28 09:45:14 +01:00
0e7eef5ea2
Set up mailing for Forgejo (#37)
All checks were successful
/ check-pre-commit (push) Successful in 17s
2024-11-27 18:12:03 +01:00
89d25fa7a5
Set up mailing for Forgejo
All checks were successful
/ check-pre-commit (pull_request) Successful in 23s
2024-11-27 17:34:05 +01:00
5134bab2d2
Improve on the Mastodon test (#35)
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-27 14:38:27 +01:00
51c3ec754f
Rename the test simply “mastodon”
All checks were successful
/ check-pre-commit (pull_request) Successful in 25s
2024-11-27 12:39:26 +01:00
7c88d47fb8
Notes and cleanup 2024-11-27 12:39:26 +01:00
f4f1ecdf71
Rework and cleanup the Mastodon test 2024-11-27 12:39:26 +01:00
5699ca8ba6
Note on more nginx proxy options for Garage 2024-11-27 12:39:26 +01:00
37aac118ce
Remove useless S3_HOSTNAME envionment variable
`S3_HOSTNAME` is only usedful for path-style buckets where Mastodon will
use `<S3_HOSTNAME>/<S3_BUCKET>`. However, we use domain-style, and that
is exactly what `S3_ALIAS_HOST` is for
2024-11-27 12:39:26 +01:00
6ef263f53e
Fix typo 2024-11-27 12:39:26 +01:00
6e260b3bdc
Consolidate virtualisation options 2024-11-27 12:39:26 +01:00
a8dcc9f298 set up expression-level tests
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-27 12:36:32 +01:00
22c7c3091f simplify lib export 2024-11-27 12:36:32 +01:00
37590599ad
Basic README file
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
/ check-pre-commit (push) Successful in 6s
2024-11-27 12:23:04 +01:00
80f38ff7bc
Move the Proxmox architecture document to infra/ 2024-11-27 12:20:33 +01:00
746fddcbbb Forgejo: enable Git LFS
All checks were successful
/ check-pre-commit (push) Successful in 21s
2024-11-26 13:57:27 +01:00
243ff8f070
Bump nixpkgs to 24.11
All checks were successful
/ check-pre-commit (push) Successful in 29s
2024-11-26 13:00:00 +01:00
b04b3c457f
Fix typo
All checks were successful
/ check-pre-commit (push) Successful in 23s
2024-11-22 17:37:15 +01:00
da25f9221a
Cleanup and consolidate remaining configurations (#23)
All checks were successful
/ check-pre-commit (push) Successful in 17s
2024-11-21 12:14:22 +01:00
5bc7f954bd
Consolidate config for vm02187
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-21 12:13:34 +01:00
e4c891b284
Consolidate config for vm02186 2024-11-21 12:13:34 +01:00
104827746a
Consolidate config for vm02179 2024-11-21 12:13:34 +01:00
2beb64af83
Consolidate config for vm02116 2024-11-21 12:13:31 +01:00
d2638845d0
Factorise other configuration options (#22)
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-21 12:08:00 +01:00
fca563a987
nix.settings.trusted-users
All checks were successful
/ check-pre-commit (pull_request) Successful in 23s
2024-11-21 12:06:35 +01:00
9f471327df
environment.systemPackages 2024-11-21 12:04:59 +01:00
0749bda96c
networking.useDHCP 2024-11-21 12:04:02 +01:00
9888ae0d07
nixpkgs.hostPlatform 2024-11-21 12:04:02 +01:00
dbba09de45
system.stateVersion 2024-11-21 12:04:00 +01:00
17611b7e53
Timezone and locale 2024-11-21 12:03:12 +01:00
dd56774f34
Factorise hardware configurations (#21)
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-21 11:57:53 +01:00
8a075bb837
Keep vm02116's specificities documented 2024-11-21 11:57:26 +01:00
7c8b26c07c
Factorise hardware config of vm02187 into infra/common 2024-11-21 11:55:02 +01:00
40ae3db164
Factorise hardware config of vm02186 into infra/common 2024-11-21 11:55:02 +01:00
6d0c8caf57
Factorise hardware config of vm02179 into infra/common 2024-11-21 11:55:01 +01:00
ba8c1d9d9c
Move hardware config of vm02116 to infra/common 2024-11-21 11:55:01 +01:00
1e8174799b
Factorise users configurations (#20)
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-21 11:45:19 +01:00
67eddccc40
Apply @kevin's recommendations
All checks were successful
/ check-pre-commit (pull_request) Successful in 21s
- Remove `root`'s password; SSH password authentication is already
  removed for all users.
- Enable password-less sudo for `wheel` group.
- Add a note about removing `root` SSH connection altogether.
- Add `niols` user with sudo capabilities.
2024-11-21 11:44:53 +01:00
4bef70a2ab
Factorise users config of vm02187 into infra/common 2024-11-21 11:44:53 +01:00
6efe45a88b
Factorise users config of vm02186 into infra/common 2024-11-21 11:44:53 +01:00
09764eeab9
Factorise users config of vm02179 into infra/common 2024-11-21 11:44:53 +01:00
6e7e0e5ef7
Move users config of vm02116 to infra/common 2024-11-21 11:44:53 +01:00
9c7b370447
Factorise networking configurations (#19)
All checks were successful
/ check-pre-commit (push) Successful in 23s
2024-11-21 11:33:24 +01:00
60ec9aab2a
Follow @kevin's recommendations
All checks were successful
/ check-pre-commit (pull_request) Successful in 23s
2024-11-21 11:32:48 +01:00
18559dab54
Move nftables ruleset to separate file
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-20 15:58:07 +01:00
f56c00eb59
Factorise networking config of vm02187 into infra/common 2024-11-20 15:58:06 +01:00
fe6d68446b
Factorise networking config of vm02186 into infra/common 2024-11-20 15:56:33 +01:00
c8d9b1c669
Factorise networking config of vm02179 into infra/common 2024-11-20 15:56:15 +01:00
3bc484754f
Move networking config of vm02116 to infra/common 2024-11-20 15:55:49 +01:00
da127445bc
Clean-up VMs configurations 2024-11-20 14:45:20 +01:00
8ad1457763
Enable trimming of trailing whitespace as a pre-commit hook 2024-11-20 13:07:03 +01:00
fe0c69f6d9
Add changes that hadn't been pushed to the repo as well
All checks were successful
/ check-pre-commit (pull_request) Successful in 24s
/ check-pre-commit (push) Successful in 23s
2024-11-20 12:41:13 +01:00
aad7a984c2
Integrate vm02116 (Forgejo) and vm02187 (Wiki) to the infra/ directory and deployments (#11)
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-20 10:15:28 +01:00
62eea1bf8a
Add Wiki machine to web deployment
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-20 10:13:40 +01:00
2ffab40687
Add Forgejo machine to git deployment
This deployment is the old `actions-runners` deployment, renamed.
2024-11-20 10:13:40 +01:00
47bca471da
Add wiki machine to the infra/ directory 2024-11-20 10:13:40 +01:00
c2f820b85d
Add Forgejo machine to the infra/ directory 2024-11-20 10:13:40 +01:00
771708c557 drive-by refactoring
All checks were successful
/ check-pre-commit (push) Successful in 21s
2024-11-20 09:01:33 +01:00
f1c0d29df9
Add niols to the users of the website
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
/ check-pre-commit (push) Successful in 22s
2024-11-18 15:18:38 +01:00
18b03924ad Format and clean dead code in infra/ (#12)
All checks were successful
/ check-pre-commit (push) Successful in 17s
Reviewed-on: Fediversity/Fediversity#12
Co-authored-by: Nicolas “Niols” Jeannerod <nicolas.jeannerod@moduscreate.com>
Co-committed-by: Nicolas “Niols” Jeannerod <nicolas.jeannerod@moduscreate.com>
2024-11-18 12:09:30 +01:00
d8320bc287 Control the actions runners' configuration via NixOps4 (#8)
All checks were successful
/ check-pre-commit (push) Successful in 22s
Reviewed-on: Fediversity/Fediversity#8
2024-11-18 11:39:24 +01:00
e300ff517d
Small description of the infra/ subdirectory
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-18 11:28:02 +01:00
ae90b3e362
Add Valentin's SSH keys
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-18 10:56:38 +01:00
f9f096cff8
Ooops 2024-11-18 10:56:38 +01:00
69cad1592e
Rename “default” NixOps4 deployment 2024-11-18 10:56:38 +01:00
40ec7e9c8c
Make a NixOps4 deployment for action runners 2024-11-18 10:56:38 +01:00
8a53b5242b
Add files related to vm02179 2024-11-18 10:56:38 +01:00
accb4d4c81
Move files related to vm02186 to own directory 2024-11-18 10:56:38 +01:00
fc29873949 Merge pull request 'added the nixos configs of the forgje and wiki servers' (#10) from forgejo_and_wiki_configs into main
All checks were successful
/ check-pre-commit (push) Successful in 17s
Reviewed-on: Fediversity/Fediversity#10
2024-11-18 10:53:14 +01:00
2c5046ab0e added the nixos configs of the forgje and wiki servers
All checks were successful
/ check-pre-commit (pull_request) Successful in 21s
2024-11-18 10:48:16 +01:00
be057fb93b use email address that's being monitored
All checks were successful
/ check-pre-commit (pull_request) Successful in 29s
/ check-pre-commit (push) Successful in 21s
2024-11-18 09:40:41 +01:00
bd478eb32b Improve automated provisioning/removal of Proxmox VMs (#6)
All checks were successful
/ check-pre-commit (push) Successful in 18s
Reviewed-on: Fediversity/Fediversity#6
Reviewed-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2024-11-17 00:09:21 +01:00
3765a7e049
Mention the scripts in the README
All checks were successful
/ check-pre-commit (pull_request) Successful in 22s
2024-11-17 00:07:21 +01:00
94e5356886
Parallelise removal script 2024-11-17 00:07:20 +01:00
74bf29bb75
Parallelise provisioning script 2024-11-17 00:07:20 +01:00
56d125a5b0
Rework and cleanup provisioning script 2024-11-17 00:07:20 +01:00
95389bb615
Remove useless piece of code
Ids must start at 100 because of Proxmox.
2024-11-17 00:07:20 +01:00
1c614ff3b8
Add VM removal script 2024-11-17 00:07:20 +01:00
84ba26d187
Move Proxmox-related things under deployment/proxmox 2024-11-17 00:07:20 +01:00
07fa942989 increase sensitive area of the menu toggle
All checks were successful
/ check-pre-commit (push) Successful in 22s
2024-11-15 09:54:37 +01:00
b78d341d95 simplify menu-toggle label SVG 2024-11-15 09:54:37 +01:00
e61ff7c039 show mobile menu toggle in mode-sensitive color 2024-11-15 09:54:37 +01:00
9803e69e3f Plug services and deployment into the flake (#3)
All checks were successful
/ check-pre-commit (push) Successful in 1m6s
Reviewed-on: Fediversity/Fediversity#3
Reviewed-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2024-11-14 18:09:12 +01:00
435d9c861a
Integrate deployment as a flake part
All checks were successful
/ check-pre-commit (pull_request) Successful in 21s
2024-11-14 18:07:12 +01:00
fc2acc13d8
Integrate services as a flake part 2024-11-14 18:07:11 +01:00
9c08267fce
Clean up what does not belong in services 2024-11-14 18:04:02 +01:00
81ae2df87b Fix typo causing disabled CI (#5)
All checks were successful
/ check-pre-commit (push) Successful in 17s
Reviewed-on: Fediversity/Fediversity#5
Co-authored-by: Nicolas “Niols” Jeannerod <nicolas.jeannerod@moduscreate.com>
Co-committed-by: Nicolas “Niols” Jeannerod <nicolas.jeannerod@moduscreate.com>
2024-11-14 17:55:29 +01:00
7ac8ec85cc Flake environment with pre-commit hook; CI to enforce them. (#2)
Reviewed-on: Fediversity/Fediversity#2
Reviewed-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2024-11-14 17:51:15 +01:00
a888540580
Opt-in to formatting for the services/ subdirectory 2024-11-14 17:50:17 +01:00
4b77808f3f
Basic CI that checks pre-commits 2024-11-14 17:50:17 +01:00
e51fca5f0e
Basic flake with pre-commit hooks 2024-11-14 17:50:17 +01:00
c323453234
Move some gitignore at toplevel 2024-11-14 17:50:03 +01:00
3ae51fa545 deploy website from the repo (#1)
- move the impure single-node deploy helper here

  it's not used anywhere else

- reuse the pins from the website

  this needs to be cleaned up later

- don't copy the config to the server

  it's impure (can't even build that without jumping through hoops), and useless when building via SSH

Reviewed-on: Fediversity/Fediversity#1
2024-11-14 13:41:19 +01:00
63 changed files with 1738 additions and 1443 deletions

View file

@ -0,0 +1,16 @@
on:
pull_request:
types:
- opened
- synchronize
- reopened
push:
branches:
- main
jobs:
check-pre-commit:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.pre-commit -L

View file

@ -1,9 +1,15 @@
.DS_Store
.idea
*.log
tmp/
*.iso
.proxmox
/.pre-commit-config.yaml
nixos.qcow2 nixos.qcow2
result* .envrc
.direnv .direnv
result*
.nixos-test-history .nixos-test-history
*screenshot.png *screenshot.png
output output
todo todo
/.pre-commit-config.yaml

View file

@ -1,2 +1,29 @@
# Fediversity # The Fediversity project
This repository contains all the code and code-related files having to do with
[the Fediversity project](https://fediversity.eu/), with the notable exception
of [NixOps4 that is hosted on GitHub](https://github.com/nixops4/nixops4).
## Content of this repository
Most of the directories in this repository have their own README going into more
details as to what they are for. As an overview:
- [`deployment/`](./deployment) contains bits and pieces having to do with
auto-deployment of test VMs on a private Proxmox.
- [`infra/`](./infra) contains the configurations for the various VMs that are
in production for the project, for instance the Git instances or the Wiki.
- [`matrix/`](./matrix) contains everything having to do with setting up a
fully-featured Matrix server.
- [`server/`](./server) contains the configuration of the VM hosting the
website. This should be integrated into `infra/` shortly in the future, as
tracked in https://git.fediversity.eu/Fediversity/Fediversity/issues/31.
- [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

View file

@ -1,8 +0,0 @@
.DS_Store
.idea
*.log
tmp/
*.iso
result
.proxmox
.pre-commit-config.yaml

129
deployment/flake-part.nix Normal file
View file

@ -0,0 +1,129 @@
{ inputs, self, ... }:
let
allVmIds = # 100 -- 255
let
allVmIdsFrom = x: if x > 255 then [ ] else [ x ] ++ allVmIdsFrom (x + 1);
in
allVmIdsFrom 100;
makeInstaller = import ./makeInstaller.nix;
in
{
flake.nixosConfigurations.provisioning =
let
inherit (builtins) map listToAttrs;
makeProvisioningConfiguration =
vmid:
inputs.nixpkgs.lib.nixosSystem {
modules = [
{ procolix.vmid = vmid; }
./procolixVm.nix
inputs.disko.nixosModules.default
];
};
in
listToAttrs (
map (vmid: {
name = "fedi${toString vmid}";
value = makeProvisioningConfiguration vmid;
}) allVmIds
);
flake.isoInstallers.provisioning =
let
inherit (builtins) mapAttrs;
in
mapAttrs (
vmname:
makeInstaller {
inherit (inputs) nixpkgs;
hostKeys = {
rsa = {
private = ./hostKeys/${vmname}/ssh_host_rsa_key;
public = ./hostKeys/${vmname}/ssh_host_rsa_key.pub;
};
ed25519 = {
private = ./hostKeys/${vmname}/ssh_host_ed25519_key;
public = ./hostKeys/${vmname}/ssh_host_ed25519_key.pub;
};
};
}
) self.nixosConfigurations.provisioning;
nixops4Deployments.feditest =
{ providers, ... }:
let
inherit (builtins) readFile;
makeProcolixVmResource = vmid: vmconfig: {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh.opts = "";
ssh.host = "95.215.187.${toString vmid}";
ssh.hostPublicKey = readFile ./hostKeys/fedi${toString vmid}/ssh_host_ed25519_key.pub;
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [
vmconfig
{ procolix.vmid = vmid; }
./procolixVm.nix
inputs.snf.nixosModules.fediversity
inputs.disko.nixosModules.default
];
};
};
in
{
providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local;
resources = {
fedi100 = makeProcolixVmResource 100 { };
fedi101 = makeProcolixVmResource 101 {
fediversity = {
enable = true;
domain = "fedi101.abundos.eu";
pixelfed.enable = true;
};
};
fedi102 = makeProcolixVmResource 102 {
fediversity = {
enable = true;
domain = "fedi102.abundos.eu";
mastodon.enable = true;
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
};
fedi103 = makeProcolixVmResource 103 (
{ pkgs, ... }:
{
fediversity = {
enable = true;
domain = "fedi103.abundos.eu";
peertube.enable = true;
temp.peertubeSecretsFile = pkgs.writeText "secret" ''
574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24
'';
};
}
);
fedi120 = makeProcolixVmResource 120 {
fediversity = {
enable = true;
domain = "fedi120.abundos.eu";
pixelfed.enable = true;
};
};
};
};
}

View file

@ -1,184 +0,0 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
# snf.url = "path:/home/niols/git/fediversity/simple-nixos-fediverse"; #dev
snf.url = "git+https://git.fediversity.eu/fediversity/simple-nixos-fediverse.git";
disko.url = "github:nix-community/disko";
nixops4.url = "github:nixops4/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4/eval";
};
outputs =
inputs@{
self,
flake-parts,
nixpkgs,
snf,
...
}:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.nixops4-nixos.modules.flake.default
inputs.git-hooks.flakeModule
];
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
"x86_64-darwin"
];
perSystem =
{
config,
inputs',
pkgs,
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks = {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
};
devShells.default = pkgs.mkShell {
packages = [ inputs'.nixops4.packages.default ];
shellHook = config.pre-commit.installationScript;
};
};
flake.vmIdTo03d =
id:
let
sid = toString id;
in
if id >= 0 && id <= 9 then
"00${sid}"
else if id >= 10 && id <= 99 then
"0${sid}"
else
sid;
flake.allVmIds = # 100 -- 255
let
allVmIdsFrom = x: if x > 255 then [ ] else [ x ] ++ allVmIdsFrom (x + 1);
in
allVmIdsFrom 100;
flake.nixosConfigurations.provisioning =
let
inherit (builtins) map listToAttrs;
makeProvisioningConfiguration =
vmid:
nixpkgs.lib.nixosSystem {
modules = [
{ procolix.vmid = vmid; }
./procolixVm.nix
inputs.disko.nixosModules.default
];
};
in
listToAttrs (
map (vmid: {
name = "fedi${self.vmIdTo03d vmid}";
value = makeProvisioningConfiguration vmid;
}) self.allVmIds
);
flake.isoInstallers.provisioning =
let
inherit (builtins) mapAttrs;
in
mapAttrs (
vmname:
snf.mkInstaller {
inherit nixpkgs;
hostKeys = {
rsa = {
private = ./hostKeys/${vmname}/ssh_host_rsa_key;
public = ./hostKeys/${vmname}/ssh_host_rsa_key.pub;
};
ed25519 = {
private = ./hostKeys/${vmname}/ssh_host_ed25519_key;
public = ./hostKeys/${vmname}/ssh_host_ed25519_key.pub;
};
};
}
) self.nixosConfigurations.provisioning;
nixops4Deployments.default =
{ providers, ... }:
let
inherit (builtins) readFile;
makeProcolixVmResource = vmid: vmconfig: {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh.opts = "";
ssh.host = "95.215.187.${self.vmIdTo03d vmid}";
ssh.hostPublicKey = readFile ./hostKeys/fedi${self.vmIdTo03d vmid}/ssh_host_ed25519_key.pub;
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [
vmconfig
{ procolix.vmid = vmid; }
./procolixVm.nix
inputs.snf.nixosModules.fediversity
inputs.disko.nixosModules.default
];
};
};
in
{
providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local;
resources = {
fedi100 = makeProcolixVmResource 100 { };
fedi101 = makeProcolixVmResource 101 {
fediversity = {
enable = true;
domain = "fedi101.abundos.eu";
pixelfed.enable = true;
};
};
fedi102 = makeProcolixVmResource 102 {
fediversity = {
enable = true;
domain = "fedi102.abundos.eu";
mastodon.enable = true;
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
};
fedi103 = makeProcolixVmResource 103 (
{ pkgs, ... }:
{
fediversity = {
enable = true;
domain = "fedi103.abundos.eu";
peertube.enable = true;
temp.peertubeSecretsFile = pkgs.writeText "secret" ''
574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24
'';
};
}
);
};
};
};
}

View file

@ -42,9 +42,7 @@ let
}; };
in in
{ {
imports = [ imports = [ "${nixpkgs}/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix" ];
"${nixpkgs}/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix"
];
nixpkgs.hostPlatform = "x86_64-linux"; nixpkgs.hostPlatform = "x86_64-linux";
services.getty.autologinUser = lib.mkForce "root"; services.getty.autologinUser = lib.mkForce "root";
programs.bash.loginShellInit = nixpkgs.lib.getExe bootstrap; programs.bash.loginShellInit = nixpkgs.lib.getExe bootstrap;

View file

@ -8,18 +8,6 @@
let let
inherit (lib) mkOption; inherit (lib) mkOption;
inherit (lib.types) types; inherit (lib.types) types;
vmIdTo03d =
id:
let
sid = toString id;
in
if id >= 0 && id <= 9 then
"00${sid}"
else if id >= 10 && id <= 99 then
"0${sid}"
else
sid;
in in
{ {
@ -30,7 +18,7 @@ in
vmid = mkOption { vmid = mkOption {
type = types.int; type = types.int;
description = '' description = ''
Identifier of the machine. This is a number between 10 and 255. Identifier of the machine. This is a number between 100 and 255.
''; '';
}; };
}; };
@ -43,7 +31,7 @@ in
services.openssh.enable = true; services.openssh.enable = true;
networking = { networking = {
hostName = "fedi${vmIdTo03d config.procolix.vmid}"; hostName = "fedi${toString config.procolix.vmid}";
domain = "procolix.com"; domain = "procolix.com";
interfaces = { interfaces = {
@ -51,7 +39,7 @@ in
ipv4 = { ipv4 = {
addresses = [ addresses = [
{ {
address = "95.215.187.${vmIdTo03d config.procolix.vmid}"; address = "95.215.187.${toString config.procolix.vmid}";
prefixLength = 24; prefixLength = 24;
} }
]; ];
@ -59,7 +47,7 @@ in
ipv6 = { ipv6 = {
addresses = [ addresses = [
{ {
address = "2a00:51c0:13:1305::${vmIdTo03d config.procolix.vmid}"; address = "2a00:51c0:13:1305::${toString config.procolix.vmid}";
prefixLength = 64; prefixLength = 64;
} }
]; ];

View file

@ -1,223 +0,0 @@
#!/usr/bin/env sh
set -euC
## Proxmox API doc: https://pve.proxmox.com/pve-docs/api-viewer
################################################################################
## Parse arguments
username=
password=
iso=result/iso/installer.iso
sockets=1
cores=1
memory=2048
vmid=
help () {
cat <<EOF
Usage: $0 [OPTION...]
Required:
--username STR Username, with provider (eg. niols@pve)
--password STR Password
--vmid INT Identifier of the VM
If not provided via the command line, username and password will be looked for
in a `.proxmox` file in the current working directory, the username on the
first line, and the password on the second.
Optional:
--iso PATH Installer ISO (default: $iso)
--sockets INT Number of sockets (default: $sockets)
--cores INT Number of cores (default: $cores)
--memory INT Memory (default: $memory)
Others:
-h|-?|--help Show this help and exit
EOF
}
die () { printf "$@"; printf '\n'; help; exit 2; }
while [ $# -gt 0 ]; do
argument=$1
shift
case $argument in
--username) readonly username=$1; shift ;;
--password) readonly password=$1; shift ;;
--vmid) readonly vmid=$1; shift ;;
--iso) iso=$1; shift ;;
--sockets) sockets=$1; shift ;;
--cores) cores=$1; shift ;;
--memory) memory=$1; shift ;;
-h|-\?|--help) help; exit 0 ;;
*) die 'Unknown argument: `%s`.' "$argument" ;;
esac
done
if [ -z "$username" ] || [ -z "$password" ]; then
if [ -f .proxmox ]; then
{ read username; read password; } < .proxmox
else
die 'Required: `--username` and `--password`.\n'
fi
fi
[ -z "$vmid" ] && die 'Required: `--vmid`.\n'
printf 'Configuration:\n'
printf ' username: %s\n' $username
printf ' password: %s\n' $password
printf ' vmid: %s\n' $vmid
readonly iso
readonly sockets
readonly cores
readonly memory
printf ' iso: %s\n' $iso
printf ' sockets: %d\n' $sockets
printf ' cores: %d\n' $cores
printf ' memory: %d\n' $memory
################################################################################
## Getting started
readonly apiurl=https://192.168.51.81:8006/api2/json
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
node=node051
from_response () { echo "$response" | jq -r "$1"; }
printf 'Authenticating...'
response=$(
http \
--verify no \
POST $apiurl/access/ticket \
"username=$username" \
"password=$password"
)
readonly csrfToken=$(from_response .data.CSRFPreventionToken)
readonly ticket=$(from_response .data.ticket)
printf ' done.\n'
http_ () {
response=$(
http \
--verify no \
"$@" \
"Cookie:PVEAuthCookie=$ticket" \
"CSRFPreventionToken:$csrfToken"
)
}
wait_ () {
upid=$1
while :; do
http_ GET $apiurl/nodes/$node/tasks/$upid/status
status=$(from_response .data.status)
case $status in
running) printf '.'; sleep 1 ;;
stopped) break ;;
*) printf ' unexpected status: `%s`\n' "$status"; exit 2 ;;
esac
done
}
################################################################################
## Upload ISO
if [ -z "$node" ]; then
printf 'Picking random node...'
http_ GET $apiurl/nodes
node=$(from_response .data[].node | sort -R | head -n 1)
printf ' done. Picked `%s`.\n' "$node"
fi
readonly node
absiso=$(cd "$(dirname "$iso")"; pwd)/$(basename "$iso")
readonly isoname=installer-$vmid.iso
printf 'Uploading ISO...'
ln -sf $absiso /tmp/$isoname
http_ --form POST $apiurl/nodes/$node/storage/local/upload \
filename@/tmp/$isoname \
content==iso
rm /tmp/$isoname
wait_ $(from_response .data)
printf ' done.\n'
################################################################################
## Create VM
printf 'Creating VM...'
http_ --form POST $apiurl/nodes/$node/qemu \
\
vmid==$vmid \
name==$(printf 'fedi%03d' $vmid) \
pool==Fediversity \
\
ide2=="local:iso/$isoname,media=cdrom" \
ostype==l26 \
\
bios==ovmf \
efidisk0=='linstor_storage:1,efitype=4m' \
agent==1 \
\
scsihw==virtio-scsi-single \
scsi0=='linstor_storage:32,discard=on,ssd=on,iothread=on' \
\
sockets==$sockets \
cores==$cores \
cpu==x86-64-v2-AES \
numa==1 \
\
memory==$memory \
\
net0=='virtio,bridge=vnet1306'
wait_ $(from_response .data)
printf ' done.\n'
################################################################################
## Install VM
printf 'Installing VM...'
http_ POST $apiurl/nodes/$node/qemu/$vmid/status/start
wait_ $(from_response .data)
while :; do
http_ GET $apiurl/nodes/$node/qemu/$vmid/status/current
status=$(from_response .data.status)
case $status in
running) printf '.'; sleep 1 ;;
stopped) break ;;
*) printf ' unexpected status: `%s`\n' "$status"; exit 2 ;;
esac
done
printf 'done.\n'
################################################################################
## Start VM
printf 'Starting VM...'
http_ --form POST $apiurl/nodes/$node/qemu/$vmid/config \
ide2=='none,media=cdrom' \
net0=='virtio,bridge=vnet1305'
wait_ $(from_response .data)
http_ POST $apiurl/nodes/$node/qemu/$vmid/status/start
wait_ $(from_response .data)
printf 'done.\n'

View file

@ -1,14 +1,22 @@
#+title: Provisioning a Proxmox VM #+title: Provisioning VMs via Proxmox
#+author: Kevin Muller, Hans van Zijst & Nicolas Jeannerod
#+date: <2024-10-25 Fri>
* Fediversity Proxmox * Quick links
- http://192.168.51.81:8006/. - Proxmox API doc :: https://pve.proxmox.com/pve-docs/api-viewer
- It is only accessible via Procolix's VPN; see with Kevin. - Fediversity Proxmox ::
- You will need identifiers. Also see with Kevin. Select “Promox VE authentication server”. - http://192.168.51.81:8006/.
- Ignore “You do not have a valid subscription” message. - It is only accessible via Procolix's VPN; see with Kevin.
- You will need identifiers. Also see with Kevin. Select “Promox VE authentication server”.
- Ignore “You do not have a valid subscription” message.
* Basic terminology * Basic terminology
- Node :: physical host - Node :: physical host
* Automatically
This directory contains scripts that can automatically provision or remove a
Proxmox VM. For now, they are tied to one node in the Fediversity Proxmox, but
it would not be difficult to make them more generic. Try:
#+begin_src sh
sh provision.sh --help
sh remove.sh --help
#+end_src
* Preparing the machine configuration * Preparing the machine configuration
- It is nicer if the machine is a QEMU guest. On NixOS: - It is nicer if the machine is a QEMU guest. On NixOS:
#+begin_src nix #+begin_src nix
@ -23,46 +31,47 @@
~2a00:51c0:13:1305::XXX~. ~2a00:51c0:13:1305::XXX~.
- Name servers should be ~95.215.185.6~ and ~95.215.185.7~. - Name servers should be ~95.215.185.6~ and ~95.215.185.7~.
- Check [[https://netbox.protagio.org][Netbox]] to see which addresses are free. - Check [[https://netbox.protagio.org][Netbox]] to see which addresses are free.
* Upload your ISO * Manually via the GUI
** Upload your ISO
- Go to Fediversity proxmox. - Go to Fediversity proxmox.
- In the left view, expand under the node that you want and click on “local”. - In the left view, expand under the node that you want and click on “local”.
- Select “ISO Images”, then click “Upload”. - Select “ISO Images”, then click “Upload”.
- Note: You can also download from URL. - Note: You can also download from URL.
- Note: You should click on “local” and not “local-zfs”. - Note: You should click on “local” and not “local-zfs”.
* Creating the VM ** Creating the VM
- Click “Create VM” at the top right corner. - Click “Create VM” at the top right corner.
** General *** General
- Node :: which node will host the VM; has to be the same - Node :: which node will host the VM; has to be the same
- VM ID :: Has to be unique, probably best to use the "xxxx" in "vm0xxxx" (yet to be decided) - VM ID :: Has to be unique, probably best to use the "xxxx" in "vm0xxxx" (yet to be decided)
- Name :: Usually "vm" + 5 digits, e.g. "vm02199" - Name :: Usually "vm" + 5 digits, e.g. "vm02199"
- Resource pool :: Fediversity - Resource pool :: Fediversity
** OS *** OS
- Use CD/DVD disc image file (iso) :: - Use CD/DVD disc image file (iso) ::
- Storage :: local, means storage of the node. - Storage :: local, means storage of the node.
- ISO image :: select the image previously uploaded - ISO image :: select the image previously uploaded
No need to touch anything else No need to touch anything else
** System *** System
- BIOS :: OVMF (UEFI) - BIOS :: OVMF (UEFI)
- EFI Storage :: ~linstor_storage~; this is a storage shared by all of the Proxmox machines. - EFI Storage :: ~linstor_storage~; this is a storage shared by all of the Proxmox machines.
- Pre-Enroll keys :: MUST be unchecked - Pre-Enroll keys :: MUST be unchecked
- Qemu Agent :: check - Qemu Agent :: check
** Disks *** Disks
- Tick “advanced” at the bottom. - Tick “advanced” at the bottom.
- Disk size (GiB) :: 40 (depending on requirements) - Disk size (GiB) :: 40 (depending on requirements)
- SSD emulation :: check (only visible if “Advanced” is checked) - SSD emulation :: check (only visible if “Advanced” is checked)
- Discard :: check, so that blocks of removed data are cleared - Discard :: check, so that blocks of removed data are cleared
** CPU *** CPU
- Sockets :: 1 (depending on requirements) - Sockets :: 1 (depending on requirements)
- Cores :: 2 (depending on requirements) - Cores :: 2 (depending on requirements)
- Enable NUMA :: check - Enable NUMA :: check
** Memory *** Memory
- Memory (MiB) :: choose what you want - Memory (MiB) :: choose what you want
- Ballooning Device :: leave checked (only visible if “Advanced” is checked) - Ballooning Device :: leave checked (only visible if “Advanced” is checked)
** Network *** Network
- Bridge :: ~vnet1306~. This is the provisioning bridge; we will change it later. - Bridge :: ~vnet1306~. This is the provisioning bridge; we will change it later.
- Firewall :: uncheck, we will handle the firewall on the VM itself - Firewall :: uncheck, we will handle the firewall on the VM itself
** Confirm *** Confirm
* Install and start the VM ** Install and start the VM
- Start the VM a first time. - Start the VM a first time.
- Select the VM in the left panel. You might have to expand the node on which it is hosted. - Select the VM in the left panel. You might have to expand the node on which it is hosted.
- Select “Console” and start the VM. - Select “Console” and start the VM.
@ -73,18 +82,18 @@ No need to touch anything else
- Double click on the CD/DVD Drive line. Select “Do not use any media” and press OK. - Double click on the CD/DVD Drive line. Select “Do not use any media” and press OK.
- Double click on Network Device, and change the bridge to ~vnet1305~, the public bridge. - Double click on Network Device, and change the bridge to ~vnet1305~, the public bridge.
- Start the VM again. - Start the VM again.
* Remove the VM ** Remove the VM
- [[Shutdown the VM]]. - [[Shutdown the VM]].
- On the top right corner, click “More”, then “Remove”. - On the top right corner, click “More”, then “Remove”.
- Enter the ID of the machine. - Enter the ID of the machine.
- Check “Purge from job configurations” - Check “Purge from job configurations”
- Check “Destroy unreferenced disks owned by guest” - Check “Destroy unreferenced disks owned by guest”
- Click “Remove”. - Click “Remove”.
* Move the VM to another node ** Move the VM to another node
- Make sure there is no ISO plugged in. - Make sure there is no ISO plugged in.
- Click on the VM. Click migrate. Choose target node. Go. - Click on the VM. Click migrate. Choose target node. Go.
- Since the storage is shared, it should go pretty fast (~1 minute). - Since the storage is shared, it should go pretty fast (~1 minute).
* Shutdown the VM ** Shutdown the VM
- Find the VM in the left panel. - Find the VM in the left panel.
- At the top right corner appears a “Shutdown” button with a submenu. - At the top right corner appears a “Shutdown” button with a submenu.
- Clicking “Shutdown” sends a signal to shutdown the machine. This might not work if the machine is not listening for that signal. - Clicking “Shutdown” sends a signal to shutdown the machine. This might not work if the machine is not listening for that signal.

281
deployment/proxmox/provision.sh Executable file
View file

@ -0,0 +1,281 @@
#!/usr/bin/env sh
set -euC
################################################################################
## Constants
readonly apiurl=https://192.168.51.81:8006/api2/json
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
readonly tmpdir=/tmp/proxmox-provision-$RANDOM$RANDOM
mkdir $tmpdir
################################################################################
## Parse arguments
username=
password=
sockets=1
cores=1
memory=2048
vmids=
help () {
cat <<EOF
Usage: $0 [OPTION...] [ID...]
Authentication options:
--username STR Username, with provider (eg. niols@pve)
--password STR Password
If not provided via the command line, username and password will be looked for
in a '.proxmox' file in the current working directory, the username on the
first line, and the password on the second.
Other options:
--sockets INT Number of sockets (default: $sockets)
--cores INT Number of cores (default: $cores)
--memory INT Memory (default: $memory)
Others:
-h|-?|--help Show this help and exit
EOF
}
die () { printf '\033[31m'; printf "$@"; printf '\033[0m\n'; exit 2; }
die_with_help () { printf '\033[31m'; printf "$@"; printf '\033[0m\n'; help; exit 2; }
while [ $# -gt 0 ]; do
argument=$1
shift
case $argument in
--username) readonly username=$1; shift ;;
--password) readonly password=$1; shift ;;
--sockets) sockets=$1; shift ;;
--cores) cores=$1; shift ;;
--memory) memory=$1; shift ;;
-h|-\?|--help) help; exit 0 ;;
-*) die_with_help 'Unknown argument: `%s`.' "$argument" ;;
*) vmids="$vmids $argument" ;;
esac
done
if [ -z "$username" ] || [ -z "$password" ]; then
if [ -f .proxmox ]; then
{ read username; read password; } < .proxmox
else
die_with_help 'Required: `--username` and `--password`.\n'
fi
fi
readonly sockets
readonly cores
readonly memory
## FIXME: When we figure out how to use other nodes than node051.
# if [ -z "$node" ]; then
# printf 'Picking random node...'
# proxmox GET $apiurl/nodes
# node=$(from_response .data[].node | sort -R | head -n 1)
# printf ' done. Picked `%s`.\n' "$node"
# fi
# readonly node
################################################################################
## Getting started
printf 'Authenticating...'
response=$(
http \
--verify no \
POST $apiurl/access/ticket \
"username=$username" \
"password=$password"
)
readonly ticket=$(echo "$response" | jq -r .data.ticket)
readonly csrfToken=$(echo "$response" | jq -r .data.CSRFPreventionToken)
printf ' done.\n'
acquire_lock () {
until mkdir $tmpdir/lock-$1 2>/dev/null; do sleep 1; done
}
release_lock () {
rmdir $tmpdir/lock-$1
}
proxmox () {
acquire_lock proxmox
http \
--form \
--verify no \
--ignore-stdin \
"$@" \
"Cookie:PVEAuthCookie=$ticket" \
"CSRFPreventionToken:$csrfToken"
release_lock proxmox
}
## Synchronous variant for when the `proxmox` function would just respond an
## UPID in the `data` JSON field.
proxmox_sync () (
response=$(proxmox "$@")
upid=$(echo "$response" | jq -r .data)
while :; do
response=$(proxmox GET $apiurl/nodes/$node/tasks/$upid/status)
status=$(echo "$response" | jq -r .data.status)
case $status in
running) sleep 1 ;;
stopped) break ;;
*) die 'unexpected status: `%s`' "$status" ;;
esac
done
)
################################################################################
## Build ISO
build_iso () {
acquire_lock build
printf 'Building ISO for VM %d...\n' $1
nix build \
.#isoInstallers.provisioning.fedi$1 \
--log-format raw --quiet \
--out-link $tmpdir/installer-fedi$1
ln -sf $tmpdir/installer-fedi$1/iso/installer.iso $tmpdir/installer-fedi$1.iso
printf 'done building ISO for VM %d.\n' $1
release_lock build
}
################################################################################
## Upload ISO
upload_iso () {
acquire_lock upload
printf 'Uploading ISO for VM %d...\n' $1
proxmox_sync POST $apiurl/nodes/$node/storage/local/upload \
filename@$tmpdir/installer-fedi$1.iso \
content==iso
printf 'done uploading ISO for VM %d.\n' $1
release_lock upload
}
################################################################################
## Remove ISO
remove_iso () {
printf 'Removing ISO for VM %d... unsupported for now. (FIXME)\n' $1
}
################################################################################
## Create VM
create_vm () {
printf 'Creating VM %d...\n' $1
proxmox_sync POST $apiurl/nodes/$node/qemu \
\
vmid==$1 \
name=="fedi$1" \
pool==Fediversity \
\
ide2=="local:iso/installer-fedi$1.iso,media=cdrom" \
ostype==l26 \
\
bios==ovmf \
efidisk0=='linstor_storage:1,efitype=4m' \
agent==1 \
\
scsihw==virtio-scsi-single \
scsi0=='linstor_storage:32,discard=on,ssd=on,iothread=on' \
\
sockets==$sockets \
cores==$cores \
cpu==x86-64-v2-AES \
numa==1 \
\
memory==$memory \
\
net0=='virtio,bridge=vnet1306'
printf 'done creating VM %d.\n' $1
}
################################################################################
## Install VM
install_vm () (
printf 'Installing VM %d...\n' $1
proxmox_sync POST $apiurl/nodes/$node/qemu/$1/status/start
while :; do
response=$(proxmox GET $apiurl/nodes/$node/qemu/$1/status/current)
status=$(echo "$response" | jq -r .data.status)
case $status in
running) sleep 1 ;;
stopped) break ;;
*) printf ' unexpected status: `%s`\n' "$status"; exit 2 ;;
esac
done
printf 'done installing VM %d.\n' $1
)
################################################################################
## Start VM
start_vm () {
printf 'Starting VM %d...\n' $1
proxmox_sync POST $apiurl/nodes/$node/qemu/$1/config \
ide2=='none,media=cdrom' \
net0=='virtio,bridge=vnet1305'
proxmox_sync POST $apiurl/nodes/$node/qemu/$1/status/start
printf 'done starting VM %d.\n' $1
}
################################################################################
## Main loop
printf 'Provisioning VMs%s with:\n' "$vmids"
printf ' sockets: %d\n' $sockets
printf ' cores: %d\n' $cores
printf ' memory: %d\n' $memory
provision_vm () {
build_iso $1
upload_iso $1
create_vm $1
install_vm $1
start_vm $1
remove_iso $1
}
for vmid in $vmids; do
provision_vm $vmid &
done
wait
printf 'done provisioning VMs%s.\n' "$vmids"
################################################################################
## Cleanup
rm -Rf $tmpdir

163
deployment/proxmox/remove.sh Executable file
View file

@ -0,0 +1,163 @@
#!/usr/bin/env sh
set -euC
################################################################################
## Constants
readonly apiurl=https://192.168.51.81:8006/api2/json
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
readonly tmpdir=/tmp/proxmox-provision-$RANDOM$RANDOM
mkdir $tmpdir
################################################################################
## Parse arguments
username=
password=
vmids=
help () {
cat <<EOF
Usage: $0 [OPTION...] [ID...]
Authentication options:
--username STR Username, with provider (eg. niols@pve)
--password STR Password
If not provided via the command line, username and password will be looked for
in a '.proxmox' file in the current working directory, the username on the
first line, and the password on the second.
Others:
-h|-?|--help Show this help and exit
EOF
}
die () { printf '\033[31m'; printf "$@"; printf '\033[0m\n'; exit 2; }
die_with_help () { printf '\033[31m'; printf "$@"; printf '\033[0m\n'; help; exit 2; }
while [ $# -gt 0 ]; do
argument=$1
shift
case $argument in
--username) readonly username=$1; shift ;;
--password) readonly password=$1; shift ;;
-h|-\?|--help) help; exit 0 ;;
-*) die_with_help 'Unknown argument: `%s`.' "$argument" ;;
*) vmids="$vmids $argument" ;;
esac
done
if [ -z "$username" ] || [ -z "$password" ]; then
if [ -f .proxmox ]; then
{ read username; read password; } < .proxmox
else
die_with_help 'Required: `--username` and `--password`.\n'
fi
fi
################################################################################
## Getting started
printf 'Authenticating...'
response=$(
http \
--verify no \
POST $apiurl/access/ticket \
"username=$username" \
"password=$password"
)
readonly ticket=$(echo "$response" | jq -r .data.ticket)
readonly csrfToken=$(echo "$response" | jq -r .data.CSRFPreventionToken)
printf ' done.\n'
acquire_lock () {
until mkdir $tmpdir/lock-$1 2>/dev/null; do sleep 1; done
}
release_lock () {
rmdir $tmpdir/lock-$1
}
proxmox () {
acquire_lock proxmox
http \
--verify no \
--form \
"$@" \
"Cookie:PVEAuthCookie=$ticket" \
"CSRFPreventionToken:$csrfToken"
release_lock proxmox
}
## Synchronous variant for when the `proxmox` function would just respond an
## UPID in the `data` JSON field.
proxmox_sync () (
response=$(proxmox "$@")
upid=$(echo "$response" | jq -r .data)
while :; do
response=$(proxmox GET $apiurl/nodes/$node/tasks/$upid/status)
status=$(echo "$response" | jq -r .data.status)
case $status in
running) sleep 1 ;;
stopped) break ;;
*) die 'unexpected status: `%s`' "$status" ;;
esac
done
)
################################################################################
## Stop VM
stop_vm () {
printf 'Stopping VM %d...\n' $1
proxmox_sync POST $apiurl/nodes/$node/qemu/$1/status/stop \
'overrule-shutdown'==1
printf 'done stopping VM %d.\n' $1
}
################################################################################
## Delete VM
delete_vm () {
printf 'Deleting VM %d...\n' $1
proxmox_sync DELETE $apiurl/nodes/$node/qemu/$1 \
'destroy-unreferenced-disks'==1 \
'purge'==1
printf 'done deleting VM %d.\n' $1
}
################################################################################
## Main loop
printf 'Removing VMs%s...\n' "$vmids"
remove_vm () {
stop_vm $1
delete_vm $1
}
for vmid in $vmids; do
remove_vm $vmid &
done
wait
printf 'done removing VMs%s.\n' "$vmids"
################################################################################
## Cleanup
rm -Rf $tmpdir

View file

@ -39,29 +39,11 @@
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs"
}, },
"locked": { "locked": {
"lastModified": 1727531434, "lastModified": 1731274291,
"narHash": "sha256-b+GBgCWd2N6pkiTkRZaMFOPztPO4IVTaclYPrQl2uLk=", "narHash": "sha256-cZ0QMpv5p2a6WEE+o9uu0a4ma6RzQDOQTbm7PbixWz8=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "b709e1cc33fcde71c7db43850a55ebe6449d0959", "rev": "486250f404f4a4f4f33f8f669d83ca5f6e6b7dfc",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"disko_2": {
"inputs": {
"nixpkgs": "nixpkgs_6"
},
"locked": {
"lastModified": 1727347829,
"narHash": "sha256-y7cW6TjJKy+tu7efxeWI6lyg4VVx/9whx+OmrhmRShU=",
"owner": "nix-community",
"repo": "disko",
"rev": "1879e48907c14a70302ff5d0539c3b9b6f97feaa",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -541,11 +523,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1725194671, "lastModified": 1730958623,
"narHash": "sha256-tLGCFEFTB5TaOKkpfw3iYT9dnk4awTP/q4w+ROpMfuw=", "narHash": "sha256-JwQZIGSYnRNOgDDoIgqKITrPVil+RMWHsZH1eE1VGN0=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "b833ff01a0d694b910daca6e2ff4a3f26dee478c", "rev": "85f7e662eda4fa3a995556527c87b2524b691933",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -587,21 +569,6 @@
"type": "github" "type": "github"
} }
}, },
"nixpkgs-latest": {
"locked": {
"lastModified": 1727220152,
"narHash": "sha256-6ezRTVBZT25lQkvaPrfJSxYLwqcbNWm6feD/vG1FO0o=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "24959f933187217890b206788a85bfa73ba75949",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": { "nixpkgs-lib": {
"locked": { "locked": {
"lastModified": 1730504152, "lastModified": 1730504152,
@ -736,48 +703,16 @@
}, },
"nixpkgs_5": { "nixpkgs_5": {
"locked": { "locked": {
"lastModified": 1727672256, "lastModified": 1732350895,
"narHash": "sha256-9/79hjQc9+xyH+QxeMcRsA6hDyw6Z9Eo1/oxjvwirLk=", "narHash": "sha256-GcOQbOgmwlsRhpLGSwZJwLbo3pu9ochMETuRSS1xpz4=",
"owner": "nixos", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "1719f27dd95fd4206afb9cec9f415b539978827e", "rev": "0c582677378f2d9ffcb01490af2f2c678dcb29d3",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "nixos", "owner": "nixos",
"ref": "nixos-24.05", "ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_6": {
"locked": {
"lastModified": 1725194671,
"narHash": "sha256-tLGCFEFTB5TaOKkpfw3iYT9dnk4awTP/q4w+ROpMfuw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b833ff01a0d694b910daca6e2ff4a3f26dee478c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_7": {
"locked": {
"lastModified": 1730137230,
"narHash": "sha256-0kW6v0alzWIc/Dc/DoVZ7A9qNScv77bj/zYTKI67HZM=",
"owner": "radvendii",
"repo": "nixpkgs",
"rev": "df815998652a1d00ce7c059a1e5ef7d7c0548c90",
"type": "github"
},
"original": {
"owner": "radvendii",
"ref": "nixos_rebuild_tests",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }
@ -826,23 +761,6 @@
"type": "github" "type": "github"
} }
}, },
"pixelfed": {
"flake": false,
"locked": {
"lastModified": 1719823820,
"narHash": "sha256-CKjqnxp7p2z/13zfp4HQ1OAmaoUtqBKS6HFm6TV8Jwg=",
"owner": "pixelfed",
"repo": "pixelfed",
"rev": "4c245cf429330d01fcb8ebeb9aa8c84a9574a645",
"type": "github"
},
"original": {
"owner": "pixelfed",
"ref": "v0.12.3",
"repo": "pixelfed",
"type": "github"
}
},
"pre-commit-hooks": { "pre-commit-hooks": {
"inputs": { "inputs": {
"flake-compat": [ "flake-compat": [
@ -1003,8 +921,7 @@
"git-hooks": "git-hooks", "git-hooks": "git-hooks",
"nixops4": "nixops4", "nixops4": "nixops4",
"nixops4-nixos": "nixops4-nixos", "nixops4-nixos": "nixops4-nixos",
"nixpkgs": "nixpkgs_5", "nixpkgs": "nixpkgs_5"
"snf": "snf"
} }
}, },
"rust-overlay": { "rust-overlay": {
@ -1087,27 +1004,6 @@
"type": "github" "type": "github"
} }
}, },
"snf": {
"inputs": {
"disko": "disko_2",
"nixpkgs": "nixpkgs_7",
"nixpkgs-latest": "nixpkgs-latest",
"pixelfed": "pixelfed"
},
"locked": {
"lastModified": 1731341458,
"narHash": "sha256-n6OJFaUtqRgzu5pFsk3di2AadSpudWjF5QXIcUKgu4c=",
"ref": "refs/heads/main",
"rev": "49473c43c85e167e5ef0b1deccdfb40664774ec5",
"revCount": 104,
"type": "git",
"url": "https://git.fediversity.eu/fediversity/simple-nixos-fediverse.git"
},
"original": {
"type": "git",
"url": "https://git.fediversity.eu/fediversity/simple-nixos-fediverse.git"
}
},
"treefmt": { "treefmt": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [

78
flake.nix Normal file
View file

@ -0,0 +1,78 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
disko.url = "github:nix-community/disko";
nixops4.url = "github:nixops4/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4/eval";
};
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [
inputs.git-hooks.flakeModule
inputs.nixops4-nixos.modules.flake.default
./deployment/flake-part.nix
./infra/flake-part.nix
./services/flake-part.nix
];
perSystem =
{
config,
pkgs,
inputs',
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
## Not everybody might want pre-commit hooks, so we make them
## opt-in. Maybe one day we will decide to have them everywhere.
let
inherit (builtins) concatStringsSep;
optin = [
"deployment"
"infra"
"services"
];
files = "^((" + concatStringsSep "|" optin + ")/.*\\.nix|[^/]*\\.nix)$";
in
{
nixfmt-rfc-style = {
enable = true;
inherit files;
};
deadnix = {
enable = true;
inherit files;
};
trim-trailing-whitespace = {
enable = true;
inherit files;
};
};
devShells.default = pkgs.mkShell {
packages = [
pkgs.nil
inputs'.nixops4.packages.default
];
shellHook = config.pre-commit.installationScript;
};
};
};
}

33
infra/README.org Normal file
View file

@ -0,0 +1,33 @@
#+title: Infra
This directory contains the definition of the VMs that host our infrastructure.
Their configuration can be updated via NixOps4. Run
#+begin_src sh
nixops4 deployments list
#+end_src
to see the available deployments. Given a deployment (eg. ~git~), run
#+begin_src sh
nixops4 apply <deployment>
#+end_src
* Deployments
- ~git~ :: Machines hosting our Git infrastructure, eg. Forgejo and its actions
runners
- ~web~ :: Machines hosting our online content, eg. the website or the wiki
* Procolix machines
These machines are hosted on the Procolix Proxmox instance, to which
non-Procolix members of the project do not have access. They host our stable
infrastructure.
| Machine | Description | Deployment |
|---------+------------------------+------------|
| vm02116 | Forgejo | ~git~ |
| vm02179 | Forgejo actions runner | ~git~ |
| vm02186 | Forgejo actions runner | ~git~ |
| vm02187 | Wiki | ~web~ |

37
infra/common/default.nix Normal file
View file

@ -0,0 +1,37 @@
{ lib, pkgs, ... }:
let
inherit (lib) mkDefault;
in
{
imports = [
./hardware.nix
./networking.nix
./users.nix
];
time.timeZone = "Europe/Amsterdam";
i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05"; # do not change
nixpkgs.hostPlatform = mkDefault "x86_64-linux";
environment.systemPackages = with pkgs; [
(pkgs.vim_configurable.customize {
name = "vim";
vimrcConfig.packages.myplugins = with pkgs.vimPlugins; {
start = [ vim-nix ]; # load plugin on startup
};
vimrcConfig.customRC = ''
" your custom vimrc
set nocompatible
set backspace=indent,eol,start
" Turn on syntax highlighting by default
syntax on
" ...
'';
})
wget
subversion
];
}

24
infra/common/hardware.nix Normal file
View file

@ -0,0 +1,24 @@
{ modulesPath, ... }:
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot = {
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
initrd = {
availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
kernelModules = [ "dm-snapshot" ];
};
};
}

View file

@ -0,0 +1,73 @@
{ config, lib, ... }:
let
inherit (lib) mkOption mkDefault;
in
{
options = {
procolix.vm = {
name = mkOption { };
ip4 = mkOption { };
ip6 = mkOption { };
};
};
config = {
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = {
hostName = config.procolix.vm.name;
domain = "procolix.com";
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
interfaces = {
eth0 = {
ipv4 = {
addresses = [
{
address = config.procolix.vm.ip4;
prefixLength = 24;
}
];
};
ipv6 = {
addresses = [
{
address = config.procolix.vm.ip6;
prefixLength = 64;
}
];
};
};
};
defaultGateway = {
address = "185.206.232.1";
interface = "eth0";
};
defaultGateway6 = {
address = "2a00:51c0:12:1201::1";
interface = "eth0";
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
};
};
};
}

View file

@ -0,0 +1,70 @@
#!/usr/sbin/nft -f
flush ruleset
########### define usefull variables here #####################
define wan = eth0
define ssh_allow = {
83.161.147.127/32, # host801 ipv4
95.215.185.92/32, # host088 ipv4
95.215.185.211/32, # host089 ipv4
95.215.185.34/32, # nagios2 ipv4
95.215.185.235/32, # ansible-hq
}
define snmp_allow = {
95.215.185.31/32, # cacti ipv4
}
define nrpe_allow = {
95.215.185.34/32, # nagios2 ipv4
}
########### here starts the automated bit #####################
table inet filter {
chain input {
type filter hook input priority 0;
policy drop;
# established/related connections
ct state established,related accept
ct state invalid drop
# Limit ping requests.
ip protocol icmp icmp type echo-request limit rate over 10/second burst 50 packets drop
ip6 nexthdr icmpv6 icmpv6 type echo-request limit rate over 10/second burst 50 packets drop
# loopback interface
iifname lo accept
# icmp
ip protocol icmp icmp type { destination-unreachable, echo-reply, echo-request, source-quench, time-exceeded } accept
# Without the nd-* ones ipv6 will not work.
ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept
# open tcp ports: sshd (22)
tcp dport {ssh} accept
# open tcp ports: snmp (161)
ip saddr $snmp_allow udp dport {snmp} accept
# open tcp ports: nrpe (5666)
ip saddr $nrpe_allow tcp dport {nrpe} accept
# open tcp ports: http (80,443)
tcp dport {http,https} accept
}
chain forward {
type filter hook forward priority 0;
}
chain output {
type filter hook output priority 0;
}
}
table ip nat {
chain postrouting {
}
chain prerouting {
}
}

40
infra/common/users.nix Normal file
View file

@ -0,0 +1,40 @@
{
users.users = {
procolix = {
isNormalUser = true;
extraGroups = [ "wheel" ];
hashedPassword = "$y$j9T$UH8Dh/poTCCZ3PXk43au6/$iYen8VUEVvv7SIPqteNtTPKktLxny3TbqvjUwhvi.6B";
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAotfCIjLoDlHOe+++kVS1xiBPaS8mC5FypgrxDrDVst6SHxMTca2+IScMajzUZajenvNAoZOwIsyAPacT8OHeyFvV5Y7G874Qa+cZVqJxLht9gdXxr1GNabU3RfhhCh272dUeIKIqfgsRsM2HzdnZCMDavS1Yo+f+RhhHhnJIua+NdVFo21vPrpsz+Cd0M1NhojARLajrTHvEXW0KskUnkbfgxT0vL9jeRZxdgMS+a9ZoR5dbzOxQHWfbP8N04Xc+7CweMlvKwlWuAE/xDb5XLNHorfGWFvZuVhptJN8jPaaVS25wsmsF5IbaAuSZfzCtBdFQhIloUhy0L6ZisubHjQ== procolix@sshnode1"
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuT3C0f3nyQ7SwUvXcFmEYEgwL+crY6iK0Bhoi9yfn4soz3fhfMKyKSwc/0RIlRnrz3xnkyJiV0vFeU7AC1ixbGCS3T9uc0G1x0Yedd9n2yR8ZJmkdyfjZ5KE4YvqZ3f6UZn5Mtj+7tGmyp+ee+clLSHzsqeyDiX0FIgFmqiiAVJD6qeKPFAHeWz9b2MOXIBIw+fSLOpx0rosCgesOmPc8lgFvo+dMKpSlPkCuGLBPj2ObT4sLjc98NC5z8sNJMu3o5bMbiCDR9JWgx9nKj+NlALwk3Y/nzHSL/DNcnP5vz2zbX2CBKjx6ju0IXh6YKlJJVyMsH9QjwYkgDQVmy8amQ== procolix@sshnode2"
];
};
niols = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY"
];
};
valentin = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOJzgwAYAoMexc1fBJxU08YmsiU9T4Ua8QFeE4/kZNZ5"
];
};
};
security.sudo.wheelNeedsPassword = false;
nix.settings.trusted-users = [ "@wheel" ];
## FIXME: Remove direct root authentication once NixOps4 supports users with
## password-less sudo.
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJg5TlS1NGCRZwMjDgBkXeFUXqooqRlM8fJdBAQ4buPg"
];
}

75
infra/flake-part.nix Normal file
View file

@ -0,0 +1,75 @@
{ inputs, ... }:
{
nixops4Deployments.git =
{ providers, ... }:
{
providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local;
resources = {
vm02116 = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "185.206.232.34";
opts = "";
hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILriawl1za2jbxzelkL5v8KPmcvuj7xVBgwFxuM/zhYr";
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [ ./vm02116 ];
};
};
vm02179 = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "185.206.232.179";
opts = "";
hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPAsOCOsJ0vNL9fGj0XC25ir8B+k2NlVJzsiVUx+0eWM";
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [ ./vm02179 ];
};
};
vm02186 = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "185.206.232.186";
opts = "";
hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII6mnBgEeyYE4tzHeFNHVNBV6KR+hAqh3PYSqlh0QViW";
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [ ./vm02186 ];
};
};
};
};
nixops4Deployments.web =
{ providers, ... }:
{
providers.local = inputs.nixops4-nixos.modules.nixops4Provider.local;
resources = {
vm02187 = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "185.206.232.187";
opts = "";
hostPublicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN24ZfdQNklKkIqfMg/+0vqENuDcy6fhT6SfAq01ae83";
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [ ./vm02187 ];
};
};
};
};
}

View file

@ -1,24 +0,0 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
snf.url = "git+https://git.fediversity.eu/fediversity/simple-nixos-fediverse.git";
};
outputs = { self, nixpkgs, snf }:
let
vmName = "vm02186";
in {
nixosConfigurations.${vmName} = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./procolix-configuration.nix
./hardware-configuration.nix
./gitea-runner.nix
];
};
isoInstallers.${vmName} = snf.mkInstaller nixpkgs self.nixosConfigurations.${vmName};
};
}

View file

@ -1,37 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/833ac0f9-ad8c-45ae-a9bf-5844e378c44a";
fsType = "ext4";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/B4D5-3AF9";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.ens18.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -1,192 +0,0 @@
{ pkgs, ... }:
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking = {
hostName = "vm02186";
domain = "procolix.com";
interfaces = {
eth0 = {
ipv4 = {
addresses = [
{
address = "185.206.232.186";
prefixLength = 24;
}
];
};
ipv6 = {
addresses = [
{
address = "2a00:51c0:12:1201::186";
prefixLength = 64;
}
];
};
};
};
defaultGateway = {
address = "185.206.232.1";
interface = "eth0";
};
defaultGateway6 = {
address = "2a00:51c0:12:1201::1";
interface = "eth0";
};
nameservers = [ "95.215.185.6" "95.215.185.7" ];
firewall.enable = false;
nftables = {
enable = true;
ruleset = ''
#!/usr/sbin/nft -f
flush ruleset
########### define usefull variables here #####################
define wan = eth0
define ssh_allow = {
83.161.147.127/32, # host801 ipv4
95.215.185.92/32, # host088 ipv4
95.215.185.211/32, # host089 ipv4
95.215.185.34/32, # nagios2 ipv4
95.215.185.181/32, # ansible.procolix.com
95.215.185.235/32, # ansible-hq
}
define snmp_allow = {
95.215.185.31/32, # cacti ipv4
}
define nrpe_allow = {
95.215.185.34/32, # nagios2 ipv4
}
########### here starts the automated bit #####################
table inet filter {
chain input {
type filter hook input priority 0;
policy drop;
# established/related connections
ct state established,related accept
ct state invalid drop
# Limit ping requests.
ip protocol icmp icmp type echo-request limit rate over 10/second burst 50 packets drop
ip6 nexthdr icmpv6 icmpv6 type echo-request limit rate over 10/second burst 50 packets drop
# loopback interface
iifname lo accept
# icmp
ip protocol icmp icmp type { destination-unreachable, echo-reply, echo-request, source-quench, time-exceeded } accept
# Without the nd-* ones ipv6 will not work.
ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept
# open tcp ports: sshd (22)
tcp dport {ssh} accept
# open tcp ports: snmp (161)
ip saddr $snmp_allow udp dport {snmp} accept
# open tcp ports: nrpe (5666)
ip saddr $nrpe_allow tcp dport {nrpe} accept
# open tcp ports: http (80,443)
tcp dport {http,https} accept
}
chain forward {
type filter hook forward priority 0;
}
chain output {
type filter hook output priority 0;
}
}
table ip nat {
chain postrouting {
}
chain prerouting {
}
}
'';
};
};
# Set your time zone.
time.timeZone = "Europe/Amsterdam";
# Select internationalisation properties.
i18n.defaultLocale = "en_US.UTF-8";
# Define a user account. Don't forget to set a password with passwd.
users.users.root.hashedPassword = "$y$j9T$WXvLAUqArJJusuC017FCW0$.rfMOeyx/BsClkJFi5hLcynrSk.njWmfiB6Uy.9th3A";
users.users.procolix = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
hashedPassword = "$y$j9T$UH8Dh/poTCCZ3PXk43au6/$iYen8VUEVvv7SIPqteNtTPKktLxny3TbqvjUwhvi.6B";
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAotfCIjLoDlHOe+++kVS1xiBPaS8mC5FypgrxDrDVst6SHxMTca2+IScMajzUZajenvNAoZOwIsyAPacT8OHeyFvV5Y7G874Qa+cZVqJxLht9gdXxr1GNabU3RfhhCh272dUeIKIqfgsRsM2HzdnZCMDavS1Yo+f+RhhHhnJIua+NdVFo21vPrpsz+Cd0M1NhojARLajrTHvEXW0KskUnkbfgxT0vL9jeRZxdgMS+a9ZoR5dbzOxQHWfbP8N04Xc+7CweMlvKwlWuAE/xDb5XLNHorfGWFvZuVhptJN8jPaaVS25wsmsF5IbaAuSZfzCtBdFQhIloUhy0L6ZisubHjQ== procolix@sshnode1"
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuT3C0f3nyQ7SwUvXcFmEYEgwL+crY6iK0Bhoi9yfn4soz3fhfMKyKSwc/0RIlRnrz3xnkyJiV0vFeU7AC1ixbGCS3T9uc0G1x0Yedd9n2yR8ZJmkdyfjZ5KE4YvqZ3f6UZn5Mtj+7tGmyp+ee+clLSHzsqeyDiX0FIgFmqiiAVJD6qeKPFAHeWz9b2MOXIBIw+fSLOpx0rosCgesOmPc8lgFvo+dMKpSlPkCuGLBPj2ObT4sLjc98NC5z8sNJMu3o5bMbiCDR9JWgx9nKj+NlALwk3Y/nzHSL/DNcnP5vz2zbX2CBKjx6ju0IXh6YKlJJVyMsH9QjwYkgDQVmy8amQ== procolix@sshnode2"
];
packages = with pkgs; [
];
};
users.users.niols = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY niols@wallace"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBkQXv/VRZLfV0wNN9PHedmKWyAIfpPUCdaznHZNIDkS niols@orianne/fediversity"
];
packages = with pkgs; [
];
};
users.users.valentin = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOJzgwAYAoMexc1fBJxU08YmsiU9T4Ua8QFeE4/kZNZ5"
];
packages = with pkgs; [
];
};
# List packages installed in system profile. To search, run:
# $ nix search wget
environment.systemPackages = with pkgs; [
(pkgs.vim_configurable.customize {
name = "vim";
vimrcConfig.packages.myplugins = with pkgs.vimPlugins; {
start = [ vim-nix ]; # load plugin on startup
};
vimrcConfig.customRC = ''
" your custom vimrc
set nocompatible
set backspace=indent,eol,start
" Turn on syntax highlighting by default
syntax on
" ...
'';
})
wget
];
# List services that you want to enable:
# Enable the OpenSSH daemon.
services.openssh.enable = true;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "24.05"; # Did you read the comment?
}

28
infra/vm02116/default.nix Normal file
View file

@ -0,0 +1,28 @@
{
imports = [
../common
./forgejo.nix
];
procolix.vm = {
name = "vm02116";
ip4 = "185.206.232.34";
ip6 = "2a00:51c0:12:1201::20";
};
## vm02116 is running on old hardware based on a Xen VM environment, so it
## needs these extra options. Once the VM gets moved to a newer node, these
## two options can safely be removed.
boot.initrd.availableKernelModules = [ "xen_blkfront" ];
services.xe-guest-utilities.enable = true;
fileSystems."/" = {
device = "/dev/disk/by-uuid/3802a66d-e31a-4650-86f3-b51b11918853";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/2CE2-1173";
fsType = "vfat";
};
}

98
infra/vm02116/forgejo.nix Normal file
View file

@ -0,0 +1,98 @@
{ pkgs, ... }:
let
domain = "git.fediversity.eu";
in
{
services.forgejo = {
enable = true;
lfs.enable = true;
settings = {
service = {
DISABLE_REGISTRATION = true;
};
server = {
DOMAIN = "${domain}";
ROOT_URL = "https://${domain}/";
HTTP_ADDR = "127.0.0.1";
LANDING_PAGE = "explore";
};
};
settings.service.ENABLE_NOTIFY_MAIL = true;
settings.mailer = {
ENABLED = true;
PROTOCOL = "smtp+starttls";
SMTP_ADDR = "mail.protagio.nl";
SMTP_PORT = "587";
FROM = "git@fediversity.eu";
USER = "git@fediversity.eu";
};
secrets.mailer.PASSWD = "/var/lib/forgejo/data/keys/forgejo-mailpw";
database = {
type = "mysql";
socket = "/run/mysqld/mysqld.sock";
passwordFile = "/var/lib/forgejo/data/keys/forgejo-dbpassword";
};
};
users.groups.keys.members = [ "forgejo" ];
services.mysql = {
enable = true;
package = pkgs.mariadb;
ensureDatabases = [ "forgejo" ];
ensureUsers = [
{
name = "forgejo";
ensurePermissions = {
"forgejo.*" = "ALL PRIVILEGES";
};
}
];
};
security.acme = {
acceptTerms = true;
defaults.email = "beheer@procolix.com";
};
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
clientMaxBodySize = "500m";
appendHttpConfig = ''
map $uri $forgejo_access_log {
default 1;
/api/actions/runner.v1.RunnerService/FetchTask 0;
}
# Add HSTS header with preloading to HTTPS requests.
# Adding this header to HTTP requests is discouraged
map $scheme $hsts_header {
https "max-age=31536000; includeSubdomains; always";
}
add_header Strict-Transport-Security $hsts_header;
'';
virtualHosts.${domain} = {
listenAddresses = [
"185.206.232.34"
"[2a00:51c0:12:1201::20]"
];
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:3000/";
extraConfig = ''
proxy_set_header X-Real-IP $remote_addr;
#access_log /var/log/nginx/access.log info if=$forgejo_access_log;
'';
};
};
};
}

26
infra/vm02179/default.nix Normal file
View file

@ -0,0 +1,26 @@
{
imports = [
../common
./gitea-runner.nix
];
procolix.vm = {
name = "vm02179";
ip4 = "185.206.232.179";
ip6 = "2a00:51c0:12:1201::179";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/119863f8-55cf-4e2f-ac17-27599a63f241";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/D9F4-9BF0";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
}

View file

@ -0,0 +1,43 @@
{ pkgs, ... }:
{
virtualisation.docker.enable = true;
services.gitea-actions-runner = {
package = pkgs.forgejo-actions-runner;
instances.default = {
enable = true;
name = "vm02179.procolix.com";
url = "https://git.fediversity.eu";
# Obtaining the path to the runner token file may differ
token = "MKmFPY4nxfR4zPYHIRLoiJdrrfkGmcRymj0GWOAk";
labels = [
"docker:docker://node:16-bullseye"
"native:host"
];
hostPackages = with pkgs; [
bash
git
nix
nodejs
];
settings = {
log.level = "info";
runner = {
file = ".runner";
capacity = 8;
timeout = "3h";
insecure = false;
fetch_timeout = "5s";
fetch_interval = "2s";
};
};
};
};
## The Nix configuration of the system influences the Nix configuration
## in the workflow, and our workflows are often flake-based.
nix.extraOptions = ''
experimental-features = nix-command flakes
'';
}

1
infra/vm02179/token.txt Normal file
View file

@ -0,0 +1 @@
MKmFPY4nxfR4zPYHIRLoiJdrrfkGmcRymj0GWOAk

26
infra/vm02186/default.nix Normal file
View file

@ -0,0 +1,26 @@
{
imports = [
../common
./gitea-runner.nix
];
procolix.vm = {
name = "vm02186";
ip4 = "185.206.232.186";
ip6 = "2a00:51c0:12:1201::186";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/833ac0f9-ad8c-45ae-a9bf-5844e378c44a";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/B4D5-3AF9";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
}

View file

@ -25,8 +25,16 @@
## This runner supports Docker (with a default Ubuntu image) and native ## This runner supports Docker (with a default Ubuntu image) and native
## modes. In native mode, it contains a few default packages. ## modes. In native mode, it contains a few default packages.
labels = ["docker:docker://node:16-bullseye" "native:host"]; labels = [
hostPackages = with pkgs; [ bash git nix nodejs ]; "docker:docker://node:16-bullseye"
"native:host"
];
hostPackages = with pkgs; [
bash
git
nix
nodejs
];
}; };
}; };

26
infra/vm02187/default.nix Normal file
View file

@ -0,0 +1,26 @@
{
imports = [
../common
./wiki.nix
];
procolix.vm = {
name = "vm02187";
ip4 = "185.206.232.187";
ip6 = "2a00:51c0:12:1201::187";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/a46a9c46-e32b-4216-a4aa-8819b2cd0d49";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/6AB5-4FA8";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
}

65
infra/vm02187/wiki.nix Normal file
View file

@ -0,0 +1,65 @@
{ pkgs, ... }:
{
services.phpfpm.pools.mediawiki.phpOptions = ''
upload_max_filesize = 1024M;
post_max_size = 1024M;
'';
services.mediawiki = {
enable = true;
name = "Fediversity Wiki";
webserver = "nginx";
nginx.hostName = "wiki.fediversity.eu";
passwordFile = pkgs.writeText "password" "eiM9etha8ohmo9Ohphahpesiux0ahda6";
extraConfig = ''
# Disable anonymous editing
$wgGroupPermissions['*']['edit'] = false;
$wgEnableUploads = true;
$wgFileExtensions = array('png', 'jpg', 'jpeg', 'svg', 'pdf', 'odt', 'ods', 'brd', 'sch', 'JPG', 'PNG', 'JPEG', 'SVG', 'json', 'mkv', 'mp4', 'gif');
$wgUseImageMagick = true;
$wgMaxShellMemory = 524288;
$wgSVGMetadataCutoff = 1024*1024;
$wgAllowExternalImages = false;
## Permissions
$wgGroupPermissions['*']['edit'] = false;
$wgGroupPermissions['*']['createaccount'] = false;
$wgGroupPermissions['*']['autocreateaccount'] = true;
$wgGroupPermissions['user']['edit'] = true;
$wgGroupPermissions['user']['createaccount'] = true;
$wgGroupPermissions['user']['editmyprivateinfo'] = true;
$wgGroupPermissions['sysop']['interwiki'] = true;
$wgGroupPermissions['sysop']['editwidgets'] = true;
# 1 GB ought to be enough for everyone
$wgUploadSizeWarning = 1024*1024*512;
$wgMaxUploadSize = 1024*1024*1024;
$wgHeadScriptCode = <<<'END'
<link rel=me href="https://mastodon.fediversity.eu/@fediversity">
END;
'';
extensions = {
VisualEditor = null;
};
};
services.nginx = {
enable = true;
virtualHosts."wiki.fediversity.eu" = {
basicAuth = {
fediv = "SecretSauce123!";
};
forceSSL = true;
enableACME = true;
};
};
security.acme = {
acceptTerms = true;
defaults.email = "systeemmail@procolix.com";
};
users.users.nginx.extraGroups = [ "acme" ];
}

15
server/README.md Normal file
View file

@ -0,0 +1,15 @@
# fediversity.eu webserver
This directory contains the configuration for the server hosting https://fediversity.eu
Build the configuration:
```bash
nix-build -A machine
```
Deploy via SSH:
```bash
env SSH_OPTS="..." nix-shell --run deploy-webserver
```

View file

@ -1,4 +1,3 @@
# Edit this configuration file to define what should be installed on # Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page # your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help). # and in the NixOS manual (accessible by running nixos-help).
@ -7,7 +6,8 @@
{ {
imports = imports =
[ # Include the results of the hardware scan. [
# Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
]; ];
@ -32,44 +32,44 @@
forceSSL = true; forceSSL = true;
globalRedirect = "www.fediversity.eu"; globalRedirect = "www.fediversity.eu";
locations."/.well-known/matrix/client" = { locations."/.well-known/matrix/client" = {
extraConfig = '' extraConfig = ''
return 200 '{"m.homeserver": {"base_url": "https://matrix.fediversity.eu", "public_baseurl": "https://matrix.fediversity.eu"}}'; return 200 '{"m.homeserver": {"base_url": "https://matrix.fediversity.eu", "public_baseurl": "https://matrix.fediversity.eu"}}';
default_type application/json; default_type application/json;
add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"; add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"; add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization";
''; '';
}; };
locations."/.well-known/matrix/server" = { locations."/.well-known/matrix/server" = {
extraConfig = '' extraConfig = ''
return 200 '{"m.server": "matrix.fediversity.eu:443"}'; return 200 '{"m.server": "matrix.fediversity.eu:443"}';
default_type application/json; default_type application/json;
add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"; add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"; add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization";
''; '';
}; };
}; };
services.nginx.virtualHosts."www.fediversity.eu" = { services.nginx.virtualHosts."www.fediversity.eu" = {
enableACME = true; enableACME = true;
forceSSL = true; forceSSL = true;
root = "/var/www/www.fediversity.eu/fediversity.eu/public"; root = "${(import ../website { }).build}";
locations."/.well-known/matrix/client" = { locations."/.well-known/matrix/client" = {
extraConfig = '' extraConfig = ''
return 200 '{"m.homeserver": {"base_url": "https://matrix.fediversity.eu", "public_baseurl": "https://matrix.fediversity.eu"}}'; return 200 '{"m.homeserver": {"base_url": "https://matrix.fediversity.eu", "public_baseurl": "https://matrix.fediversity.eu"}}';
default_type application/json; default_type application/json;
add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"; add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"; add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization";
''; '';
}; };
locations."/.well-known/matrix/server" = { locations."/.well-known/matrix/server" = {
extraConfig = '' extraConfig = ''
return 200 '{"m.server": "matrix.fediversity.eu:443"}'; return 200 '{"m.server": "matrix.fediversity.eu:443"}';
default_type application/json; default_type application/json;
add_header Access-Control-Allow-Origin "*"; add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"; add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS";
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"; add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization";
''; '';
}; };
}; };
@ -80,7 +80,7 @@
certs."oid.foundation".extraDomainNames = [ "www.oid.foundation" ]; certs."oid.foundation".extraDomainNames = [ "www.oid.foundation" ];
}; };
networking = { networking = {
hostName = "vm02117"; hostName = "vm02117";
domain = "procolix.com"; domain = "procolix.com";
interfaces = { interfaces = {
@ -117,9 +117,9 @@
enable = true; enable = true;
ruleset = '' ruleset = ''
#!/usr/sbin/nft -f #!/usr/sbin/nft -f
flush ruleset flush ruleset
########### define usefull variables here ##################### ########### define usefull variables here #####################
define wan = eth0 define wan = eth0
define ssh_allow = { define ssh_allow = {
@ -137,38 +137,38 @@
define nrpe_allow = { define nrpe_allow = {
95.215.185.34/32, # nagios2 ipv4 95.215.185.34/32, # nagios2 ipv4
} }
########### here starts the automated bit ##################### ########### here starts the automated bit #####################
table inet filter { table inet filter {
chain input { chain input {
type filter hook input priority 0; type filter hook input priority 0;
policy drop; policy drop;
# established/related connections # established/related connections
ct state established,related accept ct state established,related accept
ct state invalid drop ct state invalid drop
# Limit ping requests. # Limit ping requests.
ip protocol icmp icmp type echo-request limit rate over 10/second burst 50 packets drop ip protocol icmp icmp type echo-request limit rate over 10/second burst 50 packets drop
ip6 nexthdr icmpv6 icmpv6 type echo-request limit rate over 10/second burst 50 packets drop ip6 nexthdr icmpv6 icmpv6 type echo-request limit rate over 10/second burst 50 packets drop
# loopback interface # loopback interface
iifname lo accept iifname lo accept
# icmp # icmp
ip protocol icmp icmp type { destination-unreachable, echo-reply, echo-request, source-quench, time-exceeded } accept ip protocol icmp icmp type { destination-unreachable, echo-reply, echo-request, source-quench, time-exceeded } accept
# Without the nd-* ones ipv6 will not work. # Without the nd-* ones ipv6 will not work.
ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept
# open tcp ports: sshd (22) # open tcp ports: sshd (22)
ip saddr $ssh_allow tcp dport {ssh} accept ip saddr $ssh_allow tcp dport {ssh} accept
# open tcp ports: snmp (161) # open tcp ports: snmp (161)
ip saddr $snmp_allow udp dport {snmp} accept ip saddr $snmp_allow udp dport {snmp} accept
# open tcp ports: nrpe (5666) # open tcp ports: nrpe (5666)
ip saddr $nrpe_allow tcp dport {nrpe} accept ip saddr $nrpe_allow tcp dport {nrpe} accept
# open tcp ports: http (80,443) # open tcp ports: http (80,443)
tcp dport {http,https} accept tcp dport {http,https} accept
} }
@ -179,13 +179,13 @@
type filter hook output priority 0; type filter hook output priority 0;
} }
} }
table ip nat { table ip nat {
chain postrouting { chain postrouting {
} }
chain prerouting { chain prerouting {
} }
} }
''; '';
}; };
}; };
@ -197,13 +197,14 @@
# Select internationalisation properties. # Select internationalisation properties.
i18n.defaultLocale = "en_US.UTF-8"; i18n.defaultLocale = "en_US.UTF-8";
security.sudo.wheelNeedsPassword = false;
# Define a user account. Don't forget to set a password with passwd. # Define a user account. Don't forget to set a password with passwd.
users.users.procolix = { users.users.procolix = {
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user. extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAotfCIjLoDlHOe+++kVS1xiBPaS8mC5FypgrxDrDVst6SHxMTca2+IScMajzUZajenvNAoZOwIsyAPacT8OHeyFvV5Y7G874Qa+cZVqJxLht9gdXxr1GNabU3RfhhCh272dUeIKIqfgsRsM2HzdnZCMDavS1Yo+f+RhhHhnJIua+NdVFo21vPrpsz+Cd0M1NhojARLajrTHvEXW0KskUnkbfgxT0vL9jeRZxdgMS+a9ZoR5dbzOxQHWfbP8N04Xc+7CweMlvKwlWuAE/xDb5XLNHorfGWFvZuVhptJN8jPaaVS25wsmsF5IbaAuSZfzCtBdFQhIloUhy0L6ZisubHjQ== procolix@sshnode1" "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAotfCIjLoDlHOe+++kVS1xiBPaS8mC5FypgrxDrDVst6SHxMTca2+IScMajzUZajenvNAoZOwIsyAPacT8OHeyFvV5Y7G874Qa+cZVqJxLht9gdXxr1GNabU3RfhhCh272dUeIKIqfgsRsM2HzdnZCMDavS1Yo+f+RhhHhnJIua+NdVFo21vPrpsz+Cd0M1NhojARLajrTHvEXW0KskUnkbfgxT0vL9jeRZxdgMS+a9ZoR5dbzOxQHWfbP8N04Xc+7CweMlvKwlWuAE/xDb5XLNHorfGWFvZuVhptJN8jPaaVS25wsmsF5IbaAuSZfzCtBdFQhIloUhy0L6ZisubHjQ== procolix@sshnode1"
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuT3C0f3nyQ7SwUvXcFmEYEgwL+crY6iK0Bhoi9yfn4soz3fhfMKyKSwc/0RIlRnrz3xnkyJiV0vFeU7AC1ixbGCS3T9uc0G1x0Yedd9n2yR8ZJmkdyfjZ5KE4YvqZ3f6UZn5Mtj+7tGmyp+ee+clLSHzsqeyDiX0FIgFmqiiAVJD6qeKPFAHeWz9b2MOXIBIw+fSLOpx0rosCgesOmPc8lgFvo+dMKpSlPkCuGLBPj2ObT4sLjc98NC5z8sNJMu3o5bMbiCDR9JWgx9nKj+NlALwk3Y/nzHSL/DNcnP5vz2zbX2CBKjx6ju0IXh6YKlJJVyMsH9QjwYkgDQVmy8amQ== procolix@sshnode2" "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuT3C0f3nyQ7SwUvXcFmEYEgwL+crY6iK0Bhoi9yfn4soz3fhfMKyKSwc/0RIlRnrz3xnkyJiV0vFeU7AC1ixbGCS3T9uc0G1x0Yedd9n2yR8ZJmkdyfjZ5KE4YvqZ3f6UZn5Mtj+7tGmyp+ee+clLSHzsqeyDiX0FIgFmqiiAVJD6qeKPFAHeWz9b2MOXIBIw+fSLOpx0rosCgesOmPc8lgFvo+dMKpSlPkCuGLBPj2ObT4sLjc98NC5z8sNJMu3o5bMbiCDR9JWgx9nKj+NlALwk3Y/nzHSL/DNcnP5vz2zbX2CBKjx6ju0IXh6YKlJJVyMsH9QjwYkgDQVmy8amQ== procolix@sshnode2"
]; ];
packages = with pkgs; [ packages = with pkgs; [
]; ];
@ -212,7 +213,7 @@
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user. extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBbK4ZB0Xnpf8yyK4QOI2HvjgQINI3GKi7/O2VEsYXUb laurenshof@Laurenss-MacBook-Air.local" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBbK4ZB0Xnpf8yyK4QOI2HvjgQINI3GKi7/O2VEsYXUb laurenshof@Laurenss-MacBook-Air.local"
]; ];
packages = with pkgs; [ packages = with pkgs; [
]; ];
@ -221,11 +222,18 @@
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user. extraGroups = [ "wheel" ]; # Enable sudo for the user.
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJg5TlS1NGCRZwMjDgBkXeFUXqooqRlM8fJdBAQ4buPg" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJg5TlS1NGCRZwMjDgBkXeFUXqooqRlM8fJdBAQ4buPg"
]; ];
packages = with pkgs; [ packages = with pkgs; [
]; ];
}; };
users.users.niols = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY"
];
};
# List packages installed in system profile. To search, run: # List packages installed in system profile. To search, run:
# $ nix search wget # $ nix search wget
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
@ -245,24 +253,17 @@
}) })
wget wget
git git
hugo
go
nodejs
]; ];
# List services that you want to enable: # List services that you want to enable:
# Enable the OpenSSH daemon. # Enable the OpenSSH daemon.
services.openssh.enable = true; services.openssh.enable = true;
services.openssh.settings.PasswordAuthentication = false;
# Enable xe-guest-utilities # Enable xe-guest-utilities
services.xe-guest-utilities.enable = true; services.xe-guest-utilities.enable = true;
# Copy the NixOS configuration file and link it from the resulting system
# (/run/current-system/configuration.nix). This is useful in case you
# accidentally delete configuration.nix.
system.copySystemConfiguration = true;
# This value determines the NixOS release from which the default # This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions # settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave # on your system were taken. Its perfectly fine and recommended to leave
@ -272,4 +273,3 @@
system.stateVersion = "23.11"; # Did you read the comment? system.stateVersion = "23.11"; # Did you read the comment?
} }

46
server/default.nix Normal file
View file

@ -0,0 +1,46 @@
{ sources ? import ../website/npins
, system ? builtins.currentSystem
, pkgs ? import sources.nixpkgs {
inherit system;
config = { };
overlays = [ ];
}
, lib ? import "${sources.nixpkgs}/lib"
}:
let
# TODO: don't hard code target hosts; wire all of it up with NixOps4
host = "vm02117.procolix.com";
deploy = pkgs.writeShellApplication {
name = "deploy-webserver";
text = ''
# HACK: decouple system evaluation from shell evaluation
# the structured way for using this hack is encoded in https://github.com/fricklerhandwerk/lazy-drv
result="$(nix-build ${toString ./.} -A machine --no-out-link --eval-store auto --store ssh-ng://${host})"
# shellcheck disable=SC2087
ssh ${host} << EOF
sudo nix-env -p /nix/var/nix/profiles/system --set "$result"
sudo "$result"/bin/switch-to-configuration switch
EOF
'';
};
nixos-configuration = config:
import "${pkgs.path}/nixos/lib/eval-config.nix" {
modules = [
config
];
system = null;
};
in
rec {
nixos = nixos-configuration ./configuration.nix;
machine = nixos.config.system.build.toplevel;
shell = pkgs.mkShellNoCC {
packages = with pkgs; [
deploy
];
env = {
# TODO: reusing other pins for now; wire up the whole repo to use the same dependencies
NPINS_DIRECTORY = toString ../website/npins;
};
};
}

1
server/shell.nix Normal file
View file

@ -0,0 +1 @@
(import ./. { }).shell

View file

@ -1 +0,0 @@
use flake

View file

@ -40,7 +40,7 @@ NOTE: it sometimes takes a while for the services to start up, and in the meanti
``` ```
- Creating other accounts has to be enabled via the admin interface. `Administration > Configuration > Basic > Enable Signup` or just add an account directly from `Administration > Create user`. But functionality can also be tested from the root account. - Creating other accounts has to be enabled via the admin interface. `Administration > Configuration > Basic > Enable Signup` or just add an account directly from `Administration > Create user`. But functionality can also be tested from the root account.
- Pixelfed: <http://pixelfed.localhost:8000> - Pixelfed: through the reverse proxy at <http://pixelfed.localhost:8080>
- Account creation via the web interface won't work until we figure out email - Account creation via the web interface won't work until we figure out email
- For now, they can be created on the VM command line - For now, they can be created on the VM command line
```bash ```bash
@ -57,16 +57,6 @@ nix build .#installers.peertube
Upload the image in `./result` to Proxmox when creating a VM. Upload the image in `./result` to Proxmox when creating a VM.
Booting the image will format the disk and install NixOS with the desired configuration. Booting the image will format the disk and install NixOS with the desired configuration.
# Deploying an updated machine configuration
> TODO: There is currently no way to specify an actual target machine by name.
Assuming you have SSH configuration with access to the remote `root` user stored for a machine called e.g. `peertube`, deploy the configuration by the same name:
```bash
nix run .#deploy.peertube
```
## debugging notes ## debugging notes
- it is sometimes useful to `cat result/bin/run-nixos-vm` to see what's really going on (e.g. which ports are getting forwarded) - it is sometimes useful to `cat result/bin/run-nixos-vm` to see what's really going on (e.g. which ports are getting forwarded)

View file

@ -1,13 +0,0 @@
{ writeShellApplication }:
name: _config:
writeShellApplication {
name = "deploy";
text = ''
result="$(nix build --print-out-paths ${./.}#nixosConfigurations#${name} --eval-store auto --store ssh-ng://${name})"
# shellcheck disable=SC2087
ssh ${name} << EOF
nix-env -p /nix/var/nix/profiles/system --set "$result"
"$result"/bin/switch-to-configuration switch
EOF
'';
}

View file

@ -1,36 +0,0 @@
{ ... }:
{
disko.devices.disk.main = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions = {
MBR = {
priority = 0;
size = "1M";
type = "EF02";
};
ESP = {
priority = 1;
size = "500M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
priority = 2;
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
}

View file

@ -141,12 +141,8 @@ in
types.submodule { types.submodule {
# TODO: these should be managed as secrets, not in the nix store # TODO: these should be managed as secrets, not in the nix store
options = { options = {
id = mkOption { id = mkOption { type = types.str; };
type = types.str; secret = mkOption { type = types.str; };
};
secret = mkOption {
type = types.str;
};
# TODO: assert at least one of these is true # TODO: assert at least one of these is true
# NOTE: this currently needs to be done at the top level module # NOTE: this currently needs to be done at the top level module
ensureAccess = mkOption { ensureAccess = mkOption {
@ -184,9 +180,7 @@ in
pkgs.awscli pkgs.awscli
]; ];
networking.firewall.allowedTCPPorts = [ networking.firewall.allowedTCPPorts = [ fedicfg.rpc.port ];
fedicfg.rpc.port
];
services.garage = { services.garage = {
enable = true; enable = true;
package = pkgs.garage_0_9; package = pkgs.garage_0_9;
@ -222,6 +216,10 @@ in
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Disable buffering to a temporary file. # Disable buffering to a temporary file.
proxy_max_temp_file_size 0; proxy_max_temp_file_size 0;
## NOTE: This page suggests many more options for the object storage
## proxy. We should take a look.
## https://docs.joinmastodon.org/admin/optional/object-storage-proxy/
''; '';
}; };
}; };

View file

@ -5,11 +5,7 @@ let
}; };
in in
{ { config, lib, ... }:
config,
lib,
...
}:
lib.mkIf (config.fediversity.enable && config.fediversity.mastodon.enable) { lib.mkIf (config.fediversity.enable && config.fediversity.mastodon.enable) {
#### garage setup #### garage setup
@ -50,9 +46,7 @@ lib.mkIf (config.fediversity.enable && config.fediversity.mastodon.enable) {
AWS_ACCESS_KEY_ID = snakeoil_key.id; AWS_ACCESS_KEY_ID = snakeoil_key.id;
AWS_SECRET_ACCESS_KEY = snakeoil_key.secret; AWS_SECRET_ACCESS_KEY = snakeoil_key.secret;
S3_PROTOCOL = "http"; S3_PROTOCOL = "http";
S3_HOSTNAME = config.fediversity.internal.garage.web.rootDomain; S3_ALIAS_HOST = "${S3_BUCKET}.${config.fediversity.internal.garage.web.rootDomain}";
# by default it tries to use "<S3_HOSTNAME>/<S3_BUCKET>"
S3_ALIAS_HOST = "${S3_BUCKET}.${S3_HOSTNAME}";
# SEE: the last section in https://docs.joinmastodon.org/admin/optional/object-storage/ # SEE: the last section in https://docs.joinmastodon.org/admin/optional/object-storage/
# TODO: can we set up ACLs with garage? # TODO: can we set up ACLs with garage?
S3_PERMISSION = ""; S3_PERMISSION = "";
@ -82,9 +76,6 @@ lib.mkIf (config.fediversity.enable && config.fediversity.mastodon.enable) {
fromAddress = "noreply@${config.fediversity.internal.mastodon.domain}"; fromAddress = "noreply@${config.fediversity.internal.mastodon.domain}";
createLocally = false; createLocally = false;
}; };
# TODO: this is hardware-dependent. let's figure it out when we have hardware
# streamingProcesses = 1;
}; };
security.acme = { security.acme = {

View file

@ -5,11 +5,7 @@ let
}; };
in in
{ { config, lib, ... }:
config,
lib,
...
}:
lib.mkIf (config.fediversity.enable && config.fediversity.peertube.enable) { lib.mkIf (config.fediversity.enable && config.fediversity.peertube.enable) {
networking.firewall.allowedTCPPorts = [ networking.firewall.allowedTCPPorts = [

14
services/flake-part.nix Normal file
View file

@ -0,0 +1,14 @@
{ self, ... }:
{
flake.nixosModules.fediversity = import ./fediversity;
perSystem =
{ pkgs, ... }:
{
checks = {
mastodon = import ./tests/mastodon.nix { inherit self pkgs; };
pixelfed-garage = import ./tests/pixelfed-garage.nix { inherit self pkgs; };
};
};
}

187
services/flake.lock generated
View file

@ -1,187 +0,0 @@
{
"nodes": {
"disko": {
"inputs": {
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1727347829,
"narHash": "sha256-y7cW6TjJKy+tu7efxeWI6lyg4VVx/9whx+OmrhmRShU=",
"owner": "nix-community",
"repo": "disko",
"rev": "1879e48907c14a70302ff5d0539c3b9b6f97feaa",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs_2",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1730814269,
"narHash": "sha256-fWPHyhYE6xvMI1eGY3pwBTq85wcy1YXqdzTZF+06nOg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "d70155fdc00df4628446352fc58adc640cd705c2",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1725194671,
"narHash": "sha256-tLGCFEFTB5TaOKkpfw3iYT9dnk4awTP/q4w+ROpMfuw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b833ff01a0d694b910daca6e2ff4a3f26dee478c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-latest": {
"locked": {
"lastModified": 1727220152,
"narHash": "sha256-6ezRTVBZT25lQkvaPrfJSxYLwqcbNWm6feD/vG1FO0o=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "24959f933187217890b206788a85bfa73ba75949",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1730741070,
"narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d063c1dd113c91ab27959ba540c0d9753409edf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1730137230,
"narHash": "sha256-0kW6v0alzWIc/Dc/DoVZ7A9qNScv77bj/zYTKI67HZM=",
"owner": "radvendii",
"repo": "nixpkgs",
"rev": "df815998652a1d00ce7c059a1e5ef7d7c0548c90",
"type": "github"
},
"original": {
"owner": "radvendii",
"ref": "nixos_rebuild_tests",
"repo": "nixpkgs",
"type": "github"
}
},
"pixelfed": {
"flake": false,
"locked": {
"lastModified": 1719823820,
"narHash": "sha256-CKjqnxp7p2z/13zfp4HQ1OAmaoUtqBKS6HFm6TV8Jwg=",
"owner": "pixelfed",
"repo": "pixelfed",
"rev": "4c245cf429330d01fcb8ebeb9aa8c84a9574a645",
"type": "github"
},
"original": {
"owner": "pixelfed",
"ref": "v0.12.3",
"repo": "pixelfed",
"type": "github"
}
},
"root": {
"inputs": {
"disko": "disko",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs_3",
"nixpkgs-latest": "nixpkgs-latest",
"pixelfed": "pixelfed"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,143 +0,0 @@
{
inputs = {
nixpkgs.url = "github:radvendii/nixpkgs/nixos_rebuild_tests";
nixpkgs-latest.url = "github:nixos/nixpkgs";
git-hooks.url = "github:cachix/git-hooks.nix";
pixelfed = {
url = "github:pixelfed/pixelfed?ref=v0.12.3";
flake = false;
};
disko.url = "github:nix-community/disko";
};
outputs =
{
self,
nixpkgs,
nixpkgs-latest,
git-hooks,
pixelfed,
disko,
}:
let
system = "x86_64-linux";
lib = nixpkgs.lib;
pkgs = nixpkgs.legacyPackages.${system};
pkgsLatest = nixpkgs-latest.legacyPackages.${system};
bleedingFediverseOverlay = (
_: _: {
pixelfed = pkgsLatest.pixelfed.overrideAttrs (old: {
src = pixelfed;
patches = (old.patches or [ ]) ++ [ ./fediversity/pixelfed-group-permissions.patch ];
});
## TODO: give mastodon, peertube the same treatment
}
);
in
{
nixosModules = {
## Bleeding-edge fediverse packages
bleedingFediverse = {
nixpkgs.overlays = [ bleedingFediverseOverlay ];
};
## Fediversity modules
fediversity = import ./fediversity;
## VM-specific modules
interactive-vm = import ./vm/interactive-vm.nix;
garage-vm = import ./vm/garage-vm.nix;
mastodon-vm = import ./vm/mastodon-vm.nix;
peertube-vm = import ./vm/peertube-vm.nix;
pixelfed-vm = import ./vm/pixelfed-vm.nix;
disk-layout = import ./disk-layout.nix;
};
nixosConfigurations = {
mastodon = nixpkgs.lib.nixosSystem {
inherit system;
modules = with self.nixosModules; [
disko.nixosModules.default
disk-layout
bleedingFediverse
fediversity
interactive-vm
garage-vm
mastodon-vm
];
};
peertube = nixpkgs.lib.nixosSystem {
inherit system;
modules = with self.nixosModules; [
disko.nixosModules.default
disk-layout
bleedingFediverse
fediversity
interactive-vm
garage-vm
peertube-vm
];
};
pixelfed = nixpkgs.lib.nixosSystem {
inherit system;
modules = with self.nixosModules; [
disko.nixosModules.default
disk-layout
bleedingFediverse
fediversity
interactive-vm
garage-vm
pixelfed-vm
];
};
all = nixpkgs.lib.nixosSystem {
inherit system;
modules = with self.nixosModules; [
disko.nixosModules.default
disk-layout
bleedingFediverse
fediversity
interactive-vm
garage-vm
peertube-vm
pixelfed-vm
mastodon-vm
];
};
};
## Fully-feature ISO installer
mkInstaller = import ./installer.nix;
installers = lib.mapAttrs (_: config: self.mkInstaller nixpkgs config) self.nixosConfigurations;
deploy =
let
deployCommand = (pkgs.callPackage ./deploy.nix { });
in
lib.mapAttrs (name: config: deployCommand name config) self.nixosConfigurations;
checks.${system} = {
mastodon-garage = import ./tests/mastodon-garage.nix { inherit pkgs self; };
pixelfed-garage = import ./tests/pixelfed-garage.nix { inherit pkgs self; };
pre-commit = git-hooks.lib.${system}.run {
src = ./.;
hooks = {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
};
};
};
devShells.${system}.default = pkgs.mkShell {
inputs = with pkgs; [
nil
];
shellHook = self.checks.${system}.pre-commit.shellHook;
};
};
}

View file

@ -1,23 +1,28 @@
## This file is a basic test of Mastodon functionalities.
##
## NOTE: This test will fail for Mastodon < 4.3 because of
## https://github.com/mastodon/mastodon/issues/31145
{ pkgs, self }: { pkgs, self }:
let let
lib = pkgs.lib; lib = pkgs.lib;
## FIXME: this binding was not used, but maybe we want a side-effect or something? ## FIXME: this binding was not used, but maybe we want a side-effect or something?
# rebuildableTest = import ./rebuildableTest.nix pkgs; # rebuildableTest = import ./rebuildableTest.nix pkgs;
testImage = pkgs.copyPathToStore ./green.png;
testImageColour = "#00FF00";
seleniumScript = seleniumScript =
pkgs.writers.writePython3Bin "selenium-script" pkgs.writers.writePython3Bin "selenium-script"
{ { libraries = with pkgs.python3Packages; [ selenium ]; }
libraries = with pkgs.python3Packages; [ selenium ];
}
'' ''
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
print(1)
options = Options() options = Options()
options.add_argument("--headless") options.add_argument("--headless")
# devtools don't show up in headless screenshots # devtools don't show up in headless screenshots
@ -25,7 +30,7 @@ let
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501 service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
driver = webdriver.Firefox(options=options, service=service) driver = webdriver.Firefox(options=options, service=service)
driver.get("http://mastodon.localhost:55001/public/local") driver.get("http://mastodon.localhost/public/local")
# wait until the statuses load # wait until the statuses load
WebDriverWait(driver, 90).until( WebDriverWait(driver, 90).until(
@ -36,6 +41,7 @@ let
driver.close() driver.close()
''; '';
in in
pkgs.nixosTest { pkgs.nixosTest {
name = "test-mastodon-garage"; name = "test-mastodon-garage";
@ -45,10 +51,10 @@ pkgs.nixosTest {
{ {
virtualisation.memorySize = lib.mkVMOverride 4096; virtualisation.memorySize = lib.mkVMOverride 4096;
imports = with self.nixosModules; [ imports = with self.nixosModules; [
bleedingFediverse
fediversity fediversity
garage-vm ../vm/garage-vm.nix
mastodon-vm ../vm/mastodon-vm.nix
../vm/interactive-vm.nix
]; ];
# TODO: pair down # TODO: pair down
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
@ -60,9 +66,9 @@ pkgs.nixosTest {
seleniumScript seleniumScript
helix helix
imagemagick imagemagick
expect
]; ];
environment.variables = { environment.variables = {
POST_MEDIA = ./green.png;
AWS_ACCESS_KEY_ID = config.services.garage.ensureKeys.mastodon.id; AWS_ACCESS_KEY_ID = config.services.garage.ensureKeys.mastodon.id;
AWS_SECRET_ACCESS_KEY = config.services.garage.ensureKeys.mastodon.secret; AWS_SECRET_ACCESS_KEY = config.services.garage.ensureKeys.mastodon.secret;
}; };
@ -90,64 +96,67 @@ pkgs.nixosTest {
if password_match is None: if password_match is None:
raise Exception(f"account creation did not generate a password.\n{account_creation_output}") raise Exception(f"account creation did not generate a password.\n{account_creation_output}")
password = password_match.group(1) password = password_match.group(1)
# print(f"Test user (test@test.com)'s password is: {password}")
with subtest("TTY Login"):
server.wait_until_tty_matches("1", "login: ")
server.send_chars("root\n");
with subtest("Log in with toot"): with subtest("Log in with toot"):
# toot doesn't provide a way to just specify our login details as arguments, so we have to pretend we're typing them in at the prompt # toot doesn't provide a way to just specify our login details as
server.send_chars("toot login_cli --instance http://mastodon.localhost:55001 --email test@test.com\n") # arguments, so we have to pretend we're typing them in at the prompt;
server.wait_until_tty_matches("1", "Password: ") # we use 'expect' for this purpose.
server.send_chars(password + "\n") server.succeed(f"""
server.wait_until_tty_matches("1", "Successfully logged in.") expect -c '
spawn toot login_cli --instance http://mastodon.localhost:55001 --email test@test.com
expect "Password: "
send "{password}\\n"
interact
' >&2
""")
with subtest("post text"): with subtest("Post a text"):
server.succeed("echo 'hello mastodon' | toot post") server.succeed("echo 'hello mastodon' | toot post")
with subtest("post image"): with subtest("Post an image"):
server.succeed("toot post --media $POST_MEDIA") server.succeed("toot post --media ${testImage}")
with subtest("access garage"): with subtest("Access garage"):
server.succeed("mc alias set garage ${nodes.server.fediversity.internal.garage.api.url} --api s3v4 --path off $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY") server.succeed("mc alias set garage ${nodes.server.fediversity.internal.garage.api.url} --api s3v4 --path off $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY")
server.succeed("mc ls garage/mastodon") server.succeed("mc ls garage/mastodon")
with subtest("access image in garage"): with subtest("Access image in garage"):
image = server.succeed("mc find garage --regex original") image = server.succeed("mc find garage --regex original")
image = image.rstrip() image = image.rstrip()
if image == "": if image == "":
raise Exception("image posted to mastodon did not get stored in garage") raise Exception("image posted to mastodon did not get stored in garage")
server.succeed(f"mc cat {image} >/garage-image.webp") server.succeed(f"mc cat {image} >/garage-image.webp")
garage_image_hash = server.succeed("identify -quiet -format '%#' /garage-image.webp") garage_image_hash = server.succeed("identify -quiet -format '%#' /garage-image.webp")
image_hash = server.succeed("identify -quiet -format '%#' $POST_MEDIA") image_hash = server.succeed("identify -quiet -format '%#' ${testImage}")
if garage_image_hash != image_hash: if garage_image_hash != image_hash:
raise Exception("image stored in garage did not match image uploaded") raise Exception("image stored in garage did not match image uploaded")
with subtest("Content security policy allows garage images"): with subtest("Content-Security-Policy allows garage content"):
headers = server.succeed("xh -h http://mastodon.localhost:55001/public/local") headers = server.succeed("xh -h http://mastodon.localhost:55001/public/local")
csp_match = None csp_match = None
# I can't figure out re.MULTILINE # I can't figure out re.MULTILINE
for header in headers.split("\n"): for header in headers.split("\n"):
csp_match = re.match('^Content-Security-Policy: (.*)$', header) csp_match = re.match('^Content-Security-Policy: (.*)$', header)
if csp_match is not None: if csp_match is not None:
break break
if csp_match is None: if csp_match is None:
raise Exception("mastodon did not send a content security policy header") raise Exception("mastodon did not send a content security policy header")
csp = csp_match.group(1) csp = csp_match.group(1)
# the img-src content security policy should include the garage server # the connect-src content security policy should include the garage server
## TODO: use `nodes.server.fediversity.internal.garage.api.url` same as above, but beware of escaping the regex. Be careful with port 80 though. ## TODO: use `nodes.server.fediversity.internal.garage.api.url` same as above, but beware of escaping the regex. Be careful with port 80 though.
garage_csp = re.match(".*; img-src[^;]*web\.garage\.localhost.*", csp) garage_csp = re.match(".*; img-src[^;]*web\.garage\.localhost.*", csp)
if garage_csp is None: if garage_csp is None:
raise Exception("Mastodon's content security policy does not include garage server. image will not be displayed properly on mastodon.") raise Exception("Mastodon's Content-Security-Policy does not include Garage.")
# this could in theory give a false positive if mastodon changes it's colorscheme to include pure green. # this could in theory give a false positive if mastodon changes it's colorscheme to include ${testImageColour}.
with subtest("image displays"): with subtest("Image displays"):
server.succeed("selenium-script") server.succeed("selenium-script")
server.copy_from_vm("/mastodon-screenshot.png", "") server.copy_from_vm("/mastodon-screenshot.png", "")
displayed_colors = server.succeed("convert /mastodon-screenshot.png -define histogram:unique-colors=true -format %c histogram:info:") displayed_colors = server.succeed("convert /mastodon-screenshot.png -define histogram:unique-colors=true -format %c histogram:info:")
# check that the green image displayed somewhere # check that the image displayed somewhere
green_check = re.match(".*#00FF00.*", displayed_colors, re.S) image_check = re.match(".*${testImageColour}.*", displayed_colors, re.S)
if green_check is None: if image_check is None:
raise Exception("cannot detect the uploaded image on mastodon page.") raise Exception("cannot detect the uploaded image on mastodon page.")
''; '';
} }

View file

@ -54,9 +54,7 @@ let
seleniumScriptPostPicture = seleniumScriptPostPicture =
pkgs.writers.writePython3Bin "selenium-script-post-picture" pkgs.writers.writePython3Bin "selenium-script-post-picture"
{ { libraries = with pkgs.python3Packages; [ selenium ]; }
libraries = with pkgs.python3Packages; [ selenium ];
}
'' ''
import os import os
import time import time
@ -99,9 +97,7 @@ let
seleniumScriptGetSrc = seleniumScriptGetSrc =
pkgs.writers.writePython3Bin "selenium-script-get-src" pkgs.writers.writePython3Bin "selenium-script-get-src"
{ { libraries = with pkgs.python3Packages; [ selenium ]; }
libraries = with pkgs.python3Packages; [ selenium ];
}
'' ''
${seleniumImports} ${seleniumImports}
${seleniumSetup} ${seleniumSetup}
@ -147,10 +143,9 @@ pkgs.nixosTest {
cores = 8; cores = 8;
}; };
imports = with self.nixosModules; [ imports = with self.nixosModules; [
bleedingFediverse
fediversity fediversity
garage-vm ../vm/garage-vm.nix
pixelfed-vm ../vm/pixelfed-vm.nix
]; ];
# TODO: pair down # TODO: pair down
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
@ -191,7 +186,7 @@ pkgs.nixosTest {
server.succeed("pixelfed-manage user:create --name=test --username=test --email=${email} --password=${password} --confirm_email=1") server.succeed("pixelfed-manage user:create --name=test --username=test --email=${email} --password=${password} --confirm_email=1")
# NOTE: This could in theory give a false positive if pixelfed changes it's # NOTE: This could in theory give a false positive if pixelfed changes it's
# colorscheme to include pure green. (see same problem in pixelfed-garage.nix). # colorscheme to include pure green. (see same problem in mastodon-garage.nix).
# TODO: For instance: post a red image and check that the green pixel IS NOT # TODO: For instance: post a red image and check that the green pixel IS NOT
# there, then post a green image and check that the green pixel IS there. # there, then post a green image and check that the green pixel IS there.

View file

@ -32,29 +32,8 @@
extra-experimental-features = nix-command flakes extra-experimental-features = nix-command flakes
''; '';
# no graphics. see nixos-shell virtualisation.memorySize = 2048;
virtualisation = {
graphics = false;
qemu.consoles = [
"tty0"
"hvc0"
];
qemu.options = [
"-serial null"
"-device virtio-serial"
"-chardev stdio,mux=on,id=char0,signal=off"
"-mon chardev=char0,mode=readline"
"-device virtconsole,chardev=char0,nr=0"
];
};
# we can't forward port 80 or 443, so let's run nginx on a different port
networking.firewall.allowedTCPPorts = [
8443
8080
];
services.nginx.defaultSSLListenPort = 8443;
services.nginx.defaultHTTPListenPort = 8080;
virtualisation.forwardPorts = [ virtualisation.forwardPorts = [
{ {
from = "host"; from = "host";
@ -64,12 +43,12 @@
{ {
from = "host"; from = "host";
host.port = 8080; host.port = 8080;
guest.port = 8080; guest.port = 80;
} }
{ {
from = "host"; from = "host";
host.port = 8443; host.port = 8443;
guest.port = 8443; guest.port = 443;
} }
]; ];
} }

View file

@ -33,15 +33,6 @@
email = "none"; email = "none";
}; };
}; };
virtualisation.memorySize = 2048;
virtualisation.forwardPorts = [
{
from = "host";
host.port = 44443;
guest.port = 443;
}
];
} }
#### run mastodon as development environment #### run mastodon as development environment
@ -58,7 +49,6 @@
BIND = "0.0.0.0"; BIND = "0.0.0.0";
# for letter_opener (still doesn't work though) # for letter_opener (still doesn't work though)
REMOTE_DEV = "true"; REMOTE_DEV = "true";
LOCAL_DOMAIN = "${config.fediversity.internal.mastodon.domain}:8443";
}; };
}; };

View file

@ -1,8 +1,4 @@
{ { lib, modulesPath, ... }:
lib,
modulesPath,
...
}:
let let
inherit (lib) mkVMOverride; inherit (lib) mkVMOverride;
@ -27,13 +23,4 @@ in
enableACME = mkVMOverride false; enableACME = mkVMOverride false;
}; };
}; };
virtualisation.memorySize = 2048;
virtualisation.forwardPorts = [
{
from = "host";
host.port = 8000;
guest.port = 80;
}
];
} }

View file

@ -30,6 +30,26 @@ Structured content is managed through Nix expressions, and copy is written in [C
- Edit any of the files, see [repository layout](#repository-layout) for guidance - Edit any of the files, see [repository layout](#repository-layout) for guidance
# Testing
As a derivation, e.g. for CI:
```bash
nix-build -A tests
```
In the development shell:
```bash
run-tests
```
Running tests in a loop on source code changes:
```bash
test-loop
```
# Repository layout # Repository layout
- [content](./content) - [content](./content)

View file

@ -28,7 +28,6 @@ in
]; ];
body.content = body.content =
let let
prev-content = prev.html.body.content;
to-section = { heading, body, attrs ? { } }: { to-section = { heading, body, attrs ? { } }: {
section = { section = {
heading.content = heading; heading.content = heading;
@ -43,7 +42,7 @@ in
}; };
in in
[ [
(lib.head prev-content) # header (lib.head prev.html.body.content)
{ {
section = { section = {
attrs = { }; attrs = { };

View file

@ -24,7 +24,7 @@ in
{ {
link = { link = {
label = "Contact"; label = "Contact";
url = "mailto:mail@fediversity.eu"; url = "mailto:contact@fediversity.eu";
}; };
} }
]; ];

View file

@ -14,16 +14,11 @@ let
in in
new // { types = prev.recursiveUpdate prev.types new.types; }; new // { types = prev.recursiveUpdate prev.types new.types; };
lib'' = lib.extend lib'; lib'' = lib.extend lib';
# TODO: update when the PR to expose `pkgs.devmode` is merged
# https://github.com/NixOS/nixpkgs/pull/354556
devmode = pkgs.callPackage "${sources.devmode-reusable}/pkgs/by-name/de/devmode/package.nix" {
buildArgs = "${toString ./.} -A build --show-trace";
open = "/index.html";
};
in in
rec { rec {
lib = import ./lib.nix { inherit lib; }; lib = lib'';
result = lib''.evalModules { result = lib.evalModules {
modules = [ modules = [
./structure ./structure
./content ./content
@ -38,11 +33,36 @@ rec {
inherit (result.config) build; inherit (result.config) build;
shell = pkgs.mkShellNoCC { shell =
packages = with pkgs; [ let
cmark run-tests = pkgs.writeShellApplication {
npins name = "run-tests";
devmode text = with pkgs; with lib; ''
]; ${getExe nix-unit} ${toString ./tests.nix} "$@"
}; '';
};
test-loop = pkgs.writeShellApplication {
name = "test-loop";
text = with pkgs; with lib; ''
${getExe watchexec} -w ${toString ./.} -- ${getExe nix-unit} ${toString ./tests.nix}
'';
};
devmode = pkgs.devmode.override {
buildArgs = "${toString ./.} -A build --show-trace";
open = "/index.html";
};
in
pkgs.mkShellNoCC {
packages = [
pkgs.npins
run-tests
test-loop
devmode
];
};
tests = with pkgs; with lib; runCommand "run-tests" { } ''
touch $out
${getExe nix-unit} ${./tests.nix} "$@"
'';
} }

View file

@ -99,20 +99,22 @@ rec {
relativePath = path1': path2': relativePath = path1': path2':
let let
inherit (lib.path) subpath; inherit (lib.path) subpath;
inherit (lib) lists; inherit (lib) lists length take drop min max;
path1 = subpath.components path1'; path1 = subpath.components path1';
prefix1 = with lib; take (length path1 - 1) path1; prefix1 = take (length path1 - 1) path1;
path2 = subpath.components path2'; path2 = subpath.components path2';
prefix2 = with lib; take (length path1 - 1) path2; prefix2 = take (length path2 - 1) path2;
commonPrefixLength = with lists; commonPrefixLength = with lists;
findFirstIndex (i: i.fst != i.snd) findFirstIndex (i: i.fst != i.snd)
(length prefix1) (min (length prefix1) (length prefix2))
(zipLists prefix1 prefix2); (zipLists prefix1 prefix2);
depth = max 0 (length prefix1 - commonPrefixLength);
relativeComponents = with lists; relativeComponents = with lists;
[ "." ] ++ (replicate (length prefix1 - commonPrefixLength) "..") ++ (drop commonPrefixLength path2); [ "." ] ++ (replicate depth "..") ++ (drop commonPrefixLength path2);
in in
join "/" relativeComponents; join "/" relativeComponents;

View file

@ -1,22 +1,22 @@
{ {
"pins": { "pins": {
"devmode-reusable": { "nix-unit": {
"type": "Git", "type": "Git",
"repository": { "repository": {
"type": "GitHub", "type": "GitHub",
"owner": "fricklerhandwerk", "owner": "nix-community",
"repo": "nixpkgs" "repo": "nix-unit"
}, },
"branch": "refactor-devmode", "branch": "main",
"revision": "f0746a6690939987734d6519a2e3daf28ed36d87", "revision": "2071bbb765681ac3d8194ec560c8b27ff2a3b541",
"url": "https://github.com/fricklerhandwerk/nixpkgs/archive/f0746a6690939987734d6519a2e3daf28ed36d87.tar.gz", "url": "https://github.com/nix-community/nix-unit/archive/2071bbb765681ac3d8194ec560c8b27ff2a3b541.tar.gz",
"hash": "011kg3c2mfy7y808llpmq3hf6vv6rlazx8m11w41pnym4kwr3ivz" "hash": "0blz1kcmn9vnr9q3iqp2mv13hv3pdccljmmc54f8j7ybf5v0wgmp"
}, },
"nixpkgs": { "nixpkgs": {
"type": "Channel", "type": "Channel",
"name": "nixpkgs-unstable", "name": "nixpkgs-unstable",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-24.11pre691017.b69de56fac8c/nixexprs.tar.xz", "url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre711046.8edf06bea5bc/nixexprs.tar.xz",
"hash": "0z32pj0lh5ng2a6cn0qfmka8cynnygckn5615mkaxq2aplkvgzx3" "hash": "1mwsn0rvfm603svrq3pca4c51zlix5gkyr4gl6pxhhq3q6xs5s8y"
} }
}, },
"version": 3 "version": 3

View file

@ -505,7 +505,6 @@ let
else baseType.merge loc (map (p: p.def // { value = p.processed; }) processed); else baseType.merge loc (map (p: p.def // { value = p.processed; }) processed);
}; };
in in
# HACK: bail out for now
with-section-constraints with-section-constraints
# TODO: find a reasonable cut-off for where to place raw content # TODO: find a reasonable cut-off for where to place raw content
(listOf (either str (attrTag categories.flow))); (listOf (either str (attrTag categories.flow)));

View file

@ -127,8 +127,10 @@ header > nav > ul > li > details > nav ul li {
padding: 0.25em 0; padding: 0.25em 0;
} }
#menu-toggle { #menu-toggle,
#menu-toggle + label {
display: none; display: none;
appearance: none;
} }
@media (max-width: 50em) { @media (max-width: 50em) {
@ -136,14 +138,21 @@ header > nav > ul > li > details > nav ul li {
display: block; display: block;
} }
#menu-toggle::before { #menu-toggle ~ label {
content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 20 20'%3E%3Cpath d='M0 3h20v2H0V3z m0 6h20v2H0V9z m0 6h20v2H0V0z'/%3E%3C/svg%3E"); position: absolute;
right: 0;
padding: 0.5em;
cursor: pointer;
display: block; display: block;
} }
#menu-toggle:checked::before { .menu-close,
content: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 20 20'%3E%3Cpolygon points='11 9 22 9 22 11 11 11 11 22 9 22 9 11 -2 11 -2 9 9 9 9 -2 11 -2' transform='rotate(45 10 10)'/%3E%3C/svg%3E"); .menu-open {
cursor: pointer;
} }
.menu-close { display: none; }
#menu-toggle:checked + label .menu-close { display: block; }
#menu-toggle:checked + label .menu-open { display: none; }
header > nav { header > nav {
margin-bottom: 1em; margin-bottom: 1em;
@ -210,14 +219,4 @@ header > nav > ul > li > details > nav ul li {
header { header {
position: relative; position: relative;
} }
/* for some reason this must be at the end to work */
#menu-toggle {
display: block;
position: absolute;
right: 1em;
top: 0.5em;
appearance: none;
cursor: pointer;
}
} }

View file

@ -63,7 +63,15 @@ in
body.content = [ body.content = [
'' ''
<header> <header>
<input type="checkbox" id="menu-toggle"> <input type="checkbox" id="menu-toggle" hidden>
<label for="menu-toggle" hidden>
<svg class="menu-open" xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 20 20">
<path d="M0 4 H20 M0 10 H20 M0 16 H20" stroke="currentColor" stroke-width="2"/>
</svg>
<svg class="menu-close" xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 20 20">
<path d="M2 2L18 18M18 2L2 18" stroke="currentColor" stroke-width="2"/>
</svg>
</label>
${lib.indent " " (cfg.menus.main.outputs.html page)} ${lib.indent " " (cfg.menus.main.outputs.html page)}
</header> </header>
'' ''

21
website/tests.nix Normal file
View file

@ -0,0 +1,21 @@
# tests written for running with `nix-unit`
# https://github.com/nix-community/nix-unit
let
inherit (import ./. { }) lib;
in
{
test-relativePath = with lib;
let
testData = [
{ from = "bar"; to = "baz"; expected = "./baz"; }
{ from = "foo/bar"; to = "foo/baz"; expected = "./baz"; }
{ from = "foo"; to = "bar/baz"; expected = "./bar/baz"; }
{ from = "foo/bar"; to = "baz"; expected = "./../baz"; }
{ from = "foo/bar/baz"; to = "foo"; expected = "./../../foo"; }
];
in
{
expr = map (case: relativePath case.from case.to) testData;
expected = map (case: case.expected) testData;
};
}