Compare commits

...

3 commits

111 changed files with 1166 additions and 1783 deletions

4
.envrc
View file

@ -3,8 +3,8 @@
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv --flake .)"
eval "$(lorri direnv)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use flake
use_nix
fi

View file

@ -13,13 +13,13 @@ jobs:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.pre-commit -L
- run: nix-build -A tests
check-peertube:
check-services:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.peertube -L
- run: cd services && nix-build -A tests.peertube
check-panel:
runs-on: native
@ -31,4 +31,10 @@ jobs:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-basic -L
- run: cd deployment && nix-build -A tests
check-infra:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: cd infra && nix-build -A tests

6
.gitignore vendored
View file

@ -1,3 +1,9 @@
.npins.json
.terraform/
.terraform.lock.hcl
.terraform.tfstate.lock.info
terraform.tfstate*
.auto.tfvars.json
.DS_Store
.idea
*.log

View file

@ -1,8 +1,7 @@
# The Fediversity project
This repository contains all the code and code-related files having to do with
[the Fediversity project](https://fediversity.eu/), with the notable exception
of [NixOps4 that is hosted on GitHub](https://github.com/nixops4/nixops4).
[the Fediversity project](https://fediversity.eu/).
## Goals
@ -81,27 +80,15 @@ Not everyone has the expertise and time to run their own server.
The software includes technical configuration that links software components.
Most user-facing configuration remains untouched by the deployment process.
> Example: NixOps4 is used to deploy [Pixelfed](https://pixelfed.org).
> Example: OpenTofu is used to deploy [Pixelfed](https://pixelfed.org).
- Migrate
Move service configurations and user data to a different hosting provider.
- [NixOps4](https://github.com/nixops4/nixops4)
- [OpenTofu](https://opentofu.org/)
A tool for deploying and managing resources through the Nix language.
NixOps4 development is supported by the Fediversity project
- Resource
A [resource for NixOps4](https://nixops.dev/manual/development/concept/resource.html) is any external entity that can be declared with NixOps4 expressions and manipulated with NixOps4, such as a virtual machine, an active NixOS configuration, a DNS entry, or customer database.
- Resource provider
A resource provider for NixOps4 is an executable that communicates between a resource and NixOps4 using a standardised protocol, allowing [CRUD operations](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) on the resources to be performed by NixOps4.
Refer to the [NixOps4 manual](https://nixops.dev/manual/development/resource-provider/index.html) for details.
> Example: We need a resource provider for obtaining deployment secrets from a database.
An infrastructure-as-code tool, and open-source (MPL 2.0) fork of Terraform.
## Development
@ -118,9 +105,6 @@ Contact the project team if you have questions or suggestions, or if you're inte
Most of the directories in this repository have their own README going into more
details as to what they are for. As an overview:
- [`deployment/`](./deployment) contains work to generate a full Fediversity
deployment from a minimal configuration.
- [`infra/`](./infra) contains the configurations for the various VMs that are
in production for the project, for instance the Git instances or the Wiki, as
well as means to provision and set up new ones.
@ -128,14 +112,8 @@ details as to what they are for. As an overview:
- [`keys/`](./keys) contains the public keys of the contributors to this project
as well as the systems that we administrate.
- [`matrix/`](./matrix) contains everything having to do with setting up a
fully-featured Matrix server.
- [`secrets/`](./secrets) contains the secrets that need to get injected into
machine configurations.
- [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

57
default.nix Normal file
View file

@ -0,0 +1,57 @@
{
system ? builtins.currentSystem,
sources ? import ./npins,
pkgs ? import sources.nixpkgs { inherit system; },
}:
let
inherit (sources) nixpkgs git-hooks gitignore;
inherit (pkgs) lib;
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
gitignore-nix-src = {
lib = import gitignore { inherit lib; };
};
}).run
{
src = ./.;
hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [
"npins"
".terraform"
];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
};
in
{
# shell for testing TF directly
shell = pkgs.mkShellNoCC {
inherit (pre-commit-check) shellHook;
buildInputs = pre-commit-check.enabledPackages;
packages = [
pkgs.nixfmt-rfc-style
];
};
tests = {
inherit pre-commit-check;
};
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

View file

@ -1,6 +0,0 @@
# Deployment
This repository contains work to generate a full Fediversity deployment from a
minimal configuration. This is different from [`../services/`](../services) that
focuses on one machine, providing a polished and unified interface to different
Fediverse services.

View file

@ -1,21 +0,0 @@
{ inputs, ... }:
{
nixops4Deployments.check-deployment-basic =
{ ... }:
{
imports = [
./deployment.nix
];
_module.args.inputs = inputs;
};
perSystem =
{ inputs', pkgs, ... }:
{
checks.deployment-basic = pkgs.callPackage ./nixosTest.nix {
nixops4-flake-in-a-bottle = inputs'.nixops4.packages.flake-in-a-bottle;
inherit inputs;
};
};
}

View file

@ -1,8 +1,7 @@
{
testers,
inputs,
runCommandNoCC,
nixops4-flake-in-a-bottle,
sources ? import ../npins,
...
}:
@ -14,11 +13,8 @@ testers.runNixOSTest (
...
}:
let
vmSystem = config.node.pkgs.hostPlatform.system;
pathToRoot = ../../..;
pathFromRoot = "deployment/check/basic";
deploymentName = "check-deployment-basic";
## TODO: sanity check the existence of (pathToRoot + "/flake.nix")
## TODO: sanity check that (pathToRoot + "/${pathFromRoot}" == ./.)
@ -29,12 +25,6 @@ testers.runNixOSTest (
root = pathToRoot;
};
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
builtins.toJSON config.nodes.target.system.build.networkConfig
);
@ -42,17 +32,15 @@ testers.runNixOSTest (
in
{
name = "deployment-basic";
imports = [
inputs.nixops4-nixos.modules.nixosTest.static
];
# imports = [
# ];
nodes = {
deployer =
{ pkgs, nodes, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${vmSystem}.default
];
# environment.systemPackages = [
# ];
virtualisation = {
## Memory use is expected to be dominated by the NixOS evaluation,
@ -70,11 +58,7 @@ testers.runNixOSTest (
system.extraDependencies =
[
"${inputs.flake-parts}"
"${inputs.flake-parts.inputs.nixpkgs-lib}"
"${inputs.nixops4}"
"${inputs.nixops4-nixos}"
"${inputs.nixpkgs}"
sources.nixpkgs
pkgs.stdenv
pkgs.stdenvNoCC
@ -132,19 +116,7 @@ testers.runNixOSTest (
cd work
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input flake-parts ${inputs.flake-parts} \
--override-input nixops4 ${nixops4-flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${nixops4-flake-in-a-bottle} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
\
--override-input nixpkgs ${inputs.nixpkgs} \
--override-input git-hooks ${inputs.git-hooks} \
;
""")
@ -152,7 +124,7 @@ testers.runNixOSTest (
target.fail("cowsay hi 1>&2")
with subtest("Run the deployment"):
deployer.succeed("cd work && nixops4 apply ${deploymentName} --show-trace --no-interactive")
deployer.succeed("cd work && tofu apply --show-trace --no-interactive")
with subtest("Check the deployment"):
target.succeed("cowsay hi 1>&2")

View file

@ -1,194 +1,12 @@
## `makeMakeDeployment` -- Function to help hosting providers make a
## `makeDeployment` function.
##
## https://factoryfactoryfactory.net/
## Generic utilities used in this function, eg. nixpkgs, NixOps4 providers, etc.
## REVIEW: We should maybe be more specific than just `inputs`.
{
lib,
nixops4,
nixops4-nixos,
fediversity,
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs {
inherit system;
},
}:
## Information on the hosting provider's infrastructure. This is where we inform
## this function of where it can find eg. Proxmox.
{
## Four NixOS configuration resource modules for four services. Those are VMs
## that are already deployed and on which we will push our configurations.
##
## - Ultimately, we just want a pool of VMs, or even just a Proxmox.
## - Each machine is flagged for a certain use case until we control DNS.
garageConfigurationResource,
mastodonConfigurationResource,
peertubeConfigurationResource,
pixelfedConfigurationResource,
}:
## From the hosting provider's perspective, the function is meant to be
## partially applied only until here.
## Information on the specific deployment that we request. This is the
## information coming from the FediPanel.
##
## FIXME: lock step the interface with the definitions in the FediPanel
panelConfig:
let
inherit (lib) mkIf;
in
## Regular arguments of a NixOps4 deployment module.
{ providers, ... }:
{
options = {
deployment = lib.mkOption {
description = ''
Configuration to be deployed
'';
# XXX(@fricklerhandwerk):
# misusing this will produce obscure errors that will be truncated by NixOps4
type = lib.types.submodule ./options.nix;
};
};
config = {
providers = { inherit (nixops4.modules.nixops4Provider) local; };
resources =
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
makeConfigurationResource = resourceModule: config: {
type = providers.local.exec;
imports = [
nixops4-nixos.modules.nixops4Resource.nixos
resourceModule
{
## NOTE: With NixOps4, there are several levels and all of them live
## in the NixOS module system:
##
## 1. Each NixOps4 deployment is a module.
## 2. Each NixOps4 resource is a module. This very comment is
## inside an attrset imported as a module in a resource.
## 3. Each NixOps4 'configuration' resource contains an attribute
## 'nixos.module', itself a NixOS configuration module.
nixos.module =
{ ... }:
{
imports = [
config
fediversity
];
};
}
];
};
in
{
garage-configuration = makeConfigurationResource garageConfigurationResource (
{ pkgs, ... }:
mkIf (panelConfig.mastodon.enable || panelConfig.peertube.enable || panelConfig.pixelfed.enable) {
fediversity = {
inherit (panelConfig) domain;
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
}
);
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.mastodon.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
}
);
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.peertube.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
enable = true;
## NOTE: Only ever used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
};
};
}
);
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.pixelfed.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
enable = true;
};
};
}
);
};
tests = {
deployment-basic = import ./check/basic/nixosTest.nix { inherit pkgs; };
};
}

View file

@ -1,3 +0,0 @@
{
imports = [ ./check/basic/flake-part.nix ];
}

913
flake.lock generated
View file

@ -1,913 +0,0 @@
{
"nodes": {
"agenix": {
"inputs": {
"darwin": "darwin",
"home-manager": "home-manager",
"nixpkgs": "nixpkgs",
"systems": "systems"
},
"locked": {
"lastModified": 1736955230,
"narHash": "sha256-uenf8fv2eG5bKM8C/UvFaiJMZ4IpUFaQxk9OH5t/1gA=",
"owner": "ryantm",
"repo": "agenix",
"rev": "e600439ec4c273cf11e06fe4d9d906fb98fa097c",
"type": "github"
},
"original": {
"owner": "ryantm",
"repo": "agenix",
"type": "github"
}
},
"crane": {
"flake": false,
"locked": {
"lastModified": 1727316705,
"narHash": "sha256-/mumx8AQ5xFuCJqxCIOFCHTVlxHkMT21idpbgbm/TIE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "5b03654ce046b5167e7b0bccbd8244cb56c16f0e",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "v0.19.0",
"repo": "crane",
"type": "github"
}
},
"darwin": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1700795494,
"narHash": "sha256-gzGLZSiOhf155FW7262kdHo2YDeugp3VuIFb4/GGng0=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "4b9b83d5a92e8c1fbfd8eb27eda375908c11ec4d",
"type": "github"
},
"original": {
"owner": "lnl7",
"ref": "master",
"repo": "nix-darwin",
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1740485968,
"narHash": "sha256-WK+PZHbfDjLyveXAxpnrfagiFgZWaTJglewBWniTn2Y=",
"owner": "nix-community",
"repo": "disko",
"rev": "19c1140419c4f1cdf88ad4c1cfb6605597628940",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"dream2nix": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"nixpkgs"
],
"purescript-overlay": "purescript-overlay",
"pyproject-nix": "pyproject-nix"
},
"locked": {
"lastModified": 1735160684,
"narHash": "sha256-n5CwhmqKxifuD4Sq4WuRP/h5LO6f23cGnSAuJemnd/4=",
"owner": "nix-community",
"repo": "dream2nix",
"rev": "8ce6284ff58208ed8961681276f82c2f8f978ef4",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "dream2nix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_4": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_2"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_3": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_3"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_4": {
"inputs": {
"nixpkgs-lib": [
"nixops4-nixos",
"nixops4",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1737465171,
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "9364dc02281ce2d37a1f55b6e51f7c0f65a75f17",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"gitignore": "gitignore_2",
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1737465171,
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "9364dc02281ce2d37a1f55b6e51f7c0f65a75f17",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks-nix_2": {
"inputs": {
"flake-compat": [
"nixops4-nixos",
"nixops4",
"nix"
],
"gitignore": [
"nixops4-nixos",
"nixops4",
"nix"
],
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix",
"nixpkgs"
],
"nixpkgs-stable": [
"nixops4-nixos",
"nixops4",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1734279981,
"narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"gitignore_2": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
"git-hooks-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1703113217,
"narHash": "sha256-7ulcXOk63TIT2lVDSExj7XzFx09LpdSAPtvgtM7yQPE=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "3bfaacf46133c037bb356193bd2f1765d9dc82c1",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"mk-naked-shell": {
"flake": false,
"locked": {
"lastModified": 1681286841,
"narHash": "sha256-3XlJrwlR0nBiREnuogoa5i1b4+w/XPe0z8bbrJASw0g=",
"owner": "yusdacra",
"repo": "mk-naked-shell",
"rev": "7612f828dd6f22b7fb332cc69440e839d7ffe6bd",
"type": "github"
},
"original": {
"owner": "yusdacra",
"repo": "mk-naked-shell",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat_3",
"flake-parts": "flake-parts_4",
"git-hooks-nix": "git-hooks-nix_2",
"nixfmt": "nixfmt",
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nixpkgs"
],
"nixpkgs-23-11": "nixpkgs-23-11",
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1738382221,
"narHash": "sha256-3+qJBts5RTAtjCExo6bkqrttL+skpYZzPOVEzPSwVtc=",
"owner": "NixOS",
"repo": "nix",
"rev": "d949c8de7c7e84bb7537dc772609686c37033a3b",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nix",
"type": "github"
}
},
"nix-cargo-integration": {
"inputs": {
"crane": "crane",
"dream2nix": "dream2nix",
"mk-naked-shell": "mk-naked-shell",
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nixpkgs"
],
"parts": "parts",
"rust-overlay": "rust-overlay",
"treefmt": "treefmt"
},
"locked": {
"lastModified": 1738390452,
"narHash": "sha256-o8kg4q1V2xV9ZlszAnizXJK2c+fbhAeLbGoTUa2u1Bw=",
"owner": "yusdacra",
"repo": "nix-cargo-integration",
"rev": "718d577808e3e31f445b6564d58b755294e72f42",
"type": "github"
},
"original": {
"owner": "yusdacra",
"repo": "nix-cargo-integration",
"type": "github"
}
},
"nixfmt": {
"inputs": {
"flake-utils": "flake-utils"
},
"locked": {
"lastModified": 1736283758,
"narHash": "sha256-hrKhUp2V2fk/dvzTTHFqvtOg000G1e+jyIam+D4XqhA=",
"owner": "NixOS",
"repo": "nixfmt",
"rev": "8d4bd690c247004d90d8554f0b746b1231fe2436",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixfmt",
"type": "github"
}
},
"nixops4": {
"inputs": {
"flake-parts": "flake-parts_3",
"nix": "nix",
"nix-cargo-integration": "nix-cargo-integration",
"nixpkgs": "nixpkgs_5",
"nixpkgs-old": "nixpkgs-old"
},
"locked": {
"lastModified": 1739444468,
"narHash": "sha256-brg21neEI7pUnSRksOx9IE8/Kcr8OmEg4NIxL0sSy+U=",
"owner": "nixops4",
"repo": "nixops4",
"rev": "fb601ee79d8c9e3e7aca98dd2334ea91da3ea7e0",
"type": "github"
},
"original": {
"owner": "nixops4",
"repo": "nixops4",
"type": "github"
}
},
"nixops4-nixos": {
"inputs": {
"flake-parts": "flake-parts_2",
"git-hooks-nix": "git-hooks-nix",
"nixops4": "nixops4",
"nixops4-nixos": [
"nixops4-nixos"
],
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nixpkgs"
]
},
"locked": {
"lastModified": 1740082114,
"narHash": "sha256-xNUnup8loWAMlP2+EnGX7HaO2vJlFakjvU0gTYi3F48=",
"owner": "nixops4",
"repo": "nixops4-nixos",
"rev": "85ebdd8b4b182287e4c6ca0e888f2d3a4aa7453c",
"type": "github"
},
"original": {
"owner": "nixops4",
"repo": "nixops4-nixos",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1703013332,
"narHash": "sha256-+tFNwMvlXLbJZXiMHqYq77z/RfmpfpiI3yjL6o/Zo9M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "54aac082a4d9bb5bbc5c4e899603abfb76a3f6d6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-23-11": {
"locked": {
"lastModified": 1717159533,
"narHash": "sha256-oamiKNfr2MS6yH64rUn99mIZjc45nGJlj9eGth/3Xuw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a62e6edd6d5e1fa0329b8653c801147986f8d446",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-lib_2": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-lib_3": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-old": {
"locked": {
"lastModified": 1735563628,
"narHash": "sha256-OnSAY7XDSx7CtDoqNh8jwVwh4xNL/2HaJxGjryLWzX8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b134951a4c9f3c995fd7be05f3243f8ecd65d798",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1738136902,
"narHash": "sha256-pUvLijVGARw4u793APze3j6mU1Zwdtz7hGkGGkD87qw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9a5db3142ce450045840cc8d832b13b8a2018e0c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_5": {
"locked": {
"lastModified": 1738410390,
"narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3a228057f5b619feb3186e986dbe76278d707b6e",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_6": {
"locked": {
"lastModified": 1740463929,
"narHash": "sha256-4Xhu/3aUdCKeLfdteEHMegx5ooKQvwPHNkOgNCXQrvc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "5d7db4668d7a0c6cc5fc8cf6ef33b008b2b1ed8b",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"parts": {
"inputs": {
"nixpkgs-lib": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"nixpkgs"
]
},
"locked": {
"lastModified": 1736143030,
"narHash": "sha256-+hu54pAoLDEZT9pjHlqL9DNzWz0NbUn8NEAHP7PQPzU=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "b905f6fc23a9051a6e1b741e1438dbfc0634c6de",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"purescript-overlay": {
"inputs": {
"flake-compat": "flake-compat_4",
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"dream2nix",
"nixpkgs"
],
"slimlock": "slimlock"
},
"locked": {
"lastModified": 1728546539,
"narHash": "sha256-Sws7w0tlnjD+Bjck1nv29NjC5DbL6nH5auL9Ex9Iz2A=",
"owner": "thomashoneyman",
"repo": "purescript-overlay",
"rev": "4ad4c15d07bd899d7346b331f377606631eb0ee4",
"type": "github"
},
"original": {
"owner": "thomashoneyman",
"repo": "purescript-overlay",
"type": "github"
}
},
"pyproject-nix": {
"flake": false,
"locked": {
"lastModified": 1702448246,
"narHash": "sha256-hFg5s/hoJFv7tDpiGvEvXP0UfFvFEDgTdyHIjDVHu1I=",
"owner": "davhau",
"repo": "pyproject.nix",
"rev": "5a06a2697b228c04dd2f35659b4b659ca74f7aeb",
"type": "github"
},
"original": {
"owner": "davhau",
"ref": "dream2nix",
"repo": "pyproject.nix",
"type": "github"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"disko": "disko",
"flake-parts": "flake-parts",
"git-hooks": "git-hooks",
"nixops4": [
"nixops4-nixos",
"nixops4"
],
"nixops4-nixos": "nixops4-nixos",
"nixpkgs": "nixpkgs_6"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"nixpkgs"
]
},
"locked": {
"lastModified": 1738376888,
"narHash": "sha256-S6ErHxkSm0iA7ZMsjjDaASWxbELYcdfv8BhOkkj1rHw=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "83284068670d5ae4a43641c4afb150f3446be70d",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"slimlock": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"dream2nix",
"purescript-overlay",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688756706,
"narHash": "sha256-xzkkMv3neJJJ89zo3o2ojp7nFeaZc2G0fYwNXNJRFlo=",
"owner": "thomashoneyman",
"repo": "slimlock",
"rev": "cf72723f59e2340d24881fd7bf61cb113b4c407c",
"type": "github"
},
"original": {
"owner": "thomashoneyman",
"repo": "slimlock",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"treefmt": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
"nixops4",
"nix-cargo-integration",
"nixpkgs"
]
},
"locked": {
"lastModified": 1738070913,
"narHash": "sha256-j6jC12vCFsTGDmY2u1H12lMr62fnclNjuCtAdF1a4Nk=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "bebf27d00f7d10ba75332a0541ac43676985dea3",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,74 +0,0 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
agenix.url = "github:ryantm/agenix";
disko.url = "github:nix-community/disko";
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [
inputs.git-hooks.flakeModule
inputs.nixops4.modules.flake.default
./deployment/flake-part.nix
./infra/flake-part.nix
./services/flake-part.nix
];
perSystem =
{
config,
pkgs,
lib,
inputs',
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
devShells.default = pkgs.mkShell {
packages = [
pkgs.nil
inputs'.agenix.packages.default
pkgs.openssh
pkgs.httpie
pkgs.jq
# exposing this env var as a hack to pass info in from form
(inputs'.nixops4.packages.default.overrideAttrs {
impureEnvVars = [ "DEPLOYMENT" ];
})
];
shellHook = config.pre-commit.installationScript;
};
};
};
}

10
infra/.envrc Normal file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# the shebang is ignored, but nice for editors
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use_nix
fi

View file

@ -1,98 +1,30 @@
# Infra
# service deployment
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.
deploys [NixOS](https://nixos.org/) templates using [OpenTofu](https://opentofu.org/).
## Provisioning VMs with an initial configuration
## requirements
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
future, I will improve the provisioning script to make this a bit less clunky.
In the far future, NixOps4 will be able to communicate with Proxmox directly and
everything will become much cleaner.
- [nix](https://nix.dev/)
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`.
## usage
2. Add a basic configuration for the machine. These typically go in
`infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
find inspiration. You probably do not need a `nixos.module` option at this
point.
### development
2. Add a file for each of those VM's public keys, eg.
```
touch keys/systems/fedi117.pub
```
Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration.
FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script:
```
sh infra/proxmox-provision.sh fedi117
```
The script can take several ids at the same time. It requires some
authentication options and provides several more. See `--help`.
4. (Optional) Add a DNS entry for the machine; for instance `fedi117.abundos.eu
A 95.215.187.117`.
5. Grab the public host keys for the machines in question, and add it to the
repository. For instance:
```
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
```
FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines:
```
sh infra/machines.md.sh
```
Commit it with the machine's configuration, public key, etc.
8. At this point, the machine contains a very basic configuration that contains
just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration.
FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it.
## Updating existing VM configurations
Their configuration can be updated via NixOps4. Run
before using other commands, if not using direnv:
```sh
nixops4 deployments list
nix-shell
```
to see the available deployments.
This should be done from the root of the repository,
otherwise NixOps4 will fail with something like:
```
nixops4 error: evaluation: error:
… while calling the 'getFlake' builtin
error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist, evaluation: error:
… while calling the 'getFlake' builtin
error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist
```
Then, given a deployment (eg. `fedi200`), run
then to initialize, or after updating pins or TF providers:
```sh
nixops4 apply <deployment>
setup
```
Alternatively, to run the `default` deployment, which contains all the VMs, run
then, one can use the `tofu` CLI in the sub-folders.
```sh
nixops4 apply
```
## implementing
## Removing an existing VM
See `infra/proxmox-remove.sh --help`.
proper documentation TODO.
until then, a reference implementation may be found in [`panel/`](https://git.fediversity.eu/Fediversity/Fediversity/src/branch/main/panel).

View file

@ -2,7 +2,6 @@
let
inherit (lib) mkDefault;
in
{
imports = [

View file

@ -93,7 +93,7 @@ in
description = ''
The IP address of the machine, version 4. It will be injected as a
value in `networking.interfaces.eth0`, but it will also be used to
communicate with the machine via NixOps4.
communicate with the machine.
'';
};
@ -118,7 +118,7 @@ in
description = ''
The IP address of the machine, version 6. It will be injected as a
value in `networking.interfaces.eth0`, but it will also be used to
communicate with the machine via NixOps4.
communicate with the machine.
'';
};
@ -141,7 +141,7 @@ in
hostPublicKey = mkOption {
description = ''
The ed25519 host public key of the machine. It is used to filter Age
secrets and only keep the relevant ones, and to feed to NixOps4.
secrets and only keep the relevant ones, and to feed to TF.
'';
};

View file

@ -1,5 +1,4 @@
{
inputs,
lib,
config,
...
@ -16,48 +15,27 @@ let
in
{
imports = [ ./options.nix ];
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
ssh = {
host = config.fediversityVm.ipv4.address;
hostPublicKey = config.fediversityVm.hostPublicKey;
};
nixpkgs = inputs.nixpkgs;
## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else
## should go into the `./nixos` subdirectory.
nixos.module = {
imports = [
inputs.agenix.nixosModules.default
inputs.disko.nixosModules.default
./options.nix
./nixos
];
imports = [
./options.nix
./nixos
];
## Inject the shared options from the resource's `config` into the NixOS
## configuration.
fediversityVm = config.fediversityVm;
## Read all the secrets, filter the ones that are supposed to be readable
## with this host's public key, and add them correctly to the configuration
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
}
) secrets;
## Read all the secrets, filter the ones that are supposed to be readable
## with this host's public key, and add them correctly to the configuration
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
})
) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo.
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
];
};
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo.
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors;
}

26
infra/common/shared.nix Normal file
View file

@ -0,0 +1,26 @@
{
pkgs,
config,
...
}:
let
inherit (config.terraform) hostname domain initialUser;
in
{
imports = [
<disko/module.nix>
<agenix/modules/age.nix>
../../services/fediversity
./resource.nix
];
fediversityVm.name = hostname;
fediversity = {
inherit domain;
temp.initialUser = {
inherit (initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" initialUser.password;
};
};
}

29
infra/default.nix Normal file
View file

@ -0,0 +1,29 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
}:
let
inherit (pkgs) lib;
setup = import ./setup.nix { inherit lib pkgs sources; };
in
{
# shell for testing TF directly
shell = pkgs.mkShellNoCC {
packages = [
(import ./tf.nix { inherit lib pkgs; })
pkgs.jaq
setup
];
};
tests = pkgs.callPackage ./tests.nix { };
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

44
infra/dev/main.tf Normal file
View file

@ -0,0 +1,44 @@
locals {
vm_domain = "abundos.eu"
}
module "nixos" {
source = "../sync-nix"
vm_domain = local.vm_domain
hostname = each.value.hostname
config_nix = each.value.config_nix
config_tf = each.value.config_tf
for_each = { for name, inst in {
# wiki = "vm02187" # does not resolve
# forgejo = "vm02116" # does not resolve
# TODO: move these to a separate `host` dir
dns = "fedi200"
fedipanel = "fedi201"
} : name => {
hostname = inst
config_tf = {
terraform = {
domain = local.vm_domain
hostname = inst
}
}
config_nix = <<-EOF
{
# note interpolations here TF ones
imports = [
# shared NixOS config
${path.root}/../common/shared.nix
# FIXME: separate template options by service
${path.root}/options.nix
# for service `forgejo` import `forgejo.nix`
${path.root}/../../machines/dev/${inst}/${name}.nix
# FIXME: get VM details from TF
${path.root}/../../machines/dev/${inst}
];
}
EOF
}
}
}

28
infra/dev/options.nix Normal file
View file

@ -0,0 +1,28 @@
# TODO: could (part of) this be generated somehow? c.f #275
{
lib,
...
}:
let
inherit (lib) types mkOption;
inherit (types) str enum;
in
{
options.terraform = {
domain = mkOption {
type = enum [
"fediversity.net"
];
description = ''
Apex domain under which the services will be deployed.
'';
default = "fediversity.net";
};
hostname = mkOption {
type = str;
description = ''
Internal name of the host, e.g. test01
'';
};
};
}

1
infra/dev/variables.tf Normal file
View file

@ -0,0 +1 @@

View file

@ -1,180 +0,0 @@
{
self,
inputs,
lib,
...
}:
let
inherit (builtins) readDir readFile fromJSON;
inherit (lib)
attrNames
mkOption
evalModules
filterAttrs
;
inherit (lib.attrsets) genAttrs;
## Given a machine's name and whether it is a test VM, make a resource module,
## except for its missing provider. (Depending on the use of that resource, we
## will provide a different one.)
makeResourceModule =
{ vmName, isTestVm }:
{
_module.args = { inherit inputs; };
imports =
[
./common/resource.nix
]
++ (
if isTestVm then
[
./test-machines/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
# allow our panel vm access to the test machines
(import ../keys).panel
];
}
]
else
[
./machines/${vmName}
]
);
fediversityVm.name = vmName;
};
## Given a list of machine names, make a deployment with those machines'
## configurations as resources.
makeDeployment =
vmNames:
{ providers, ... }:
{
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];
## Given an attrset of test configurations (key = test machine name, value =
## NixOS configuration module), make a deployment with those machines'
## configurations as resources.
makeTestDeployment =
(import ../deployment)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
inherit (self.nixosModules) fediversity;
}
{
garageConfigurationResource = makeResourceModule {
vmName = "test01";
isTestVm = true;
};
mastodonConfigurationResource = makeResourceModule {
vmName = "test06"; # somehow `test02` has a problem - use test06 instead
isTestVm = true;
};
peertubeConfigurationResource = makeResourceModule {
vmName = "test05";
isTestVm = true;
};
pixelfedConfigurationResource = makeResourceModule {
vmName = "test04";
isTestVm = true;
};
};
nixops4ResourceNixosMockOptions = {
## NOTE: We allow the use of a few options from
## `inputs.nixops4-nixos.modules.nixops4Resource.nixos` such that we can
## reuse modules that make use of them.
##
## REVIEW: We can probably do much better and cleaner. On the other hand,
## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4.
options = {
nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { };
ssh = mkOption { };
};
};
makeResourceConfig =
vm:
(evalModules {
modules = [
nixops4ResourceNixosMockOptions
(makeResourceModule vm)
];
}).config;
## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration =
isTestVm: vmName:
inputs.nixpkgs.lib.nixosSystem {
modules = [
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
];
};
makeVmOptions = isTestVm: vmName: {
inherit ((makeResourceConfig { inherit vmName isTestVm; }).fediversityVm)
proxmox
vmId
description
sockets
cores
memory
diskSize
hostPublicKey
unsafeHostPrivateKey
;
};
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ./machines;
testMachines = listSubdirectories ./test-machines;
in
{
flake.lib.makeInstallerIso = import ./makeInstallerIso.nix;
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment.
## - We add a “default” deployment with all normal machines.
## - We add a “test” deployment with all test machines.
nixops4Deployments = genAttrs machines makeDeployment' // {
default = makeDeployment machines;
test = makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env != "" then
env
else
builtins.trace "env var DEPLOYMENT not set, falling back to ./test-machines/configuration.json!" (readFile ./test-machines/configuration.json)
)
);
};
flake.nixosConfigurations =
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true);
flake.vmOptions =
genAttrs machines (makeVmOptions false)
// genAttrs testMachines (makeVmOptions true);
}

View file

@ -1,15 +0,0 @@
<!-- This file is auto-generated by `machines.md.sh` from the machines'
configuration. -->
# Machines
Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description
--------|---------|-------------
[`fedi200`](./fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./fedi201) | fediversity | FediPanel
[`vm02116`](./vm02116) | procolix | Forgejo
[`vm02187`](./vm02187) | procolix | Wiki
This table excludes all machines with names starting with `test`.

View file

@ -1,43 +0,0 @@
#!/usr/bin/env sh
set -euC
cd "$(dirname "$0")"
{
cat <<\EOF
<!-- This file is auto-generated by `machines.md.sh` from the machines'
configuration. -->
# Machines
Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description
--------|---------|-------------
EOF
vmOptions=$(
cd ..
nix eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
" \
--log-format raw --quiet
)
## NOTE: `jq`'s `keys` is alphabetically sorted, just what we want here.
for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
if [ "${machine#test}" = "$machine" ]; then
proxmox=$(echo "$vmOptions" | jq -r ".$machine.proxmox")
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
# shellcheck disable=SC2016
printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
fi
done
cat <<\EOF
This table excludes all machines with names starting with `test`.
EOF
} >| machines.md

View file

@ -1,38 +0,0 @@
{
fediversityVm = {
vmId = 2116;
proxmox = "procolix";
description = "Forgejo";
ipv4.address = "185.206.232.34";
ipv6.address = "2a00:51c0:12:1201::20";
};
nixos.module =
{ lib, ... }:
{
imports = [
./forgejo.nix
];
## vm02116 is running on old hardware based on a Xen VM environment, so it
## needs these extra options. Once the VM gets moved to a newer node, these
## two options can safely be removed.
boot.initrd.availableKernelModules = [ "xen_blkfront" ];
services.xe-guest-utilities.enable = true;
## NOTE: This VM was created manually, which requires us to override the
## default disko-based `fileSystems` definition.
fileSystems = lib.mkForce {
"/" = {
device = "/dev/disk/by-uuid/3802a66d-e31a-4650-86f3-b51b11918853";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-uuid/2CE2-1173";
fsType = "vfat";
};
};
};
}

View file

@ -1,36 +0,0 @@
{
fediversityVm = {
vmId = 2187;
proxmox = "procolix";
description = "Wiki";
ipv4.address = "185.206.232.187";
ipv6.address = "2a00:51c0:12:1201::187";
};
nixos.module =
{ lib, ... }:
{
imports = [
./wiki.nix
];
## NOTE: This VM was created manually, which requires us to override the
## default disko-based `fileSystems` definition.
fileSystems = lib.mkForce {
"/" = {
device = "/dev/disk/by-uuid/a46a9c46-e32b-4216-a4aa-8819b2cd0d49";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-uuid/6AB5-4FA8";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
};
};
}

74
infra/operator/main.tf Normal file
View file

@ -0,0 +1,74 @@
locals {
vm_domain = "abundos.eu"
# user-facing applications
application_configs = {
# FIXME: wrap applications at the interface to grab them in one go?
mastodon = {
cfg = var.mastodon
hostname = "test06"
}
pixelfed = {
cfg = var.pixelfed
hostname = "test04"
}
peertube = {
cfg = var.peertube
hostname = "test05"
}
}
# services shared between applications
peripherals = { for name, inst in {
garage = "test01"
} : name => {
hostname = inst
cfg = {
# enable if any user applications are enabled
enable = anytrue([for _, app in local.application_configs: try(app.cfg.enable, false)])
}
}
}
}
module "nixos" {
source = "../sync-nix"
vm_domain = local.vm_domain
hostname = each.value.hostname
config_nix = each.value.config_nix
config_tf = each.value.config_tf
for_each = {for name, inst in merge(
local.peripherals,
local.application_configs,
) : name => merge(inst, {
config_tf = {
terraform = {
domain = var.domain
hostname = inst.hostname
initialUser = var.initialUser
}
}
config_nix = <<-EOF
{
# note interpolations here TF ones
imports = [
# shared NixOS config
${path.root}/../common/shared.nix
# FIXME: separate template options by service
${path.root}/options.nix
# for service `mastodon` import `mastodon.nix`
${path.root}/../../machines/operator/${inst.hostname}/${name}.nix
# FIXME: get VM details from TF
${path.root}/../../machines/operator/${inst.hostname}
];
## FIXME: switch root authentication to users with password-less sudo, see #24
users.users.root.openssh.authorizedKeys.keys = let
keys = import ../../keys;
in [
# allow our panel vm access to the test machines
keys.panel
];
}
EOF
}) if try(inst.cfg.enable, false)}
}

View file

@ -0,0 +1,54 @@
# TODO: could (part of) this be generated somehow? c.f #275
{
lib,
...
}:
let
inherit (lib) types mkOption;
inherit (types) str enum submodule;
in
{
options.terraform = {
domain = mkOption {
type = enum [
"fediversity.net"
];
description = ''
Apex domain under which the services will be deployed.
'';
default = "fediversity.net";
};
hostname = mkOption {
type = str;
description = ''
Internal name of the host, e.g. test01
'';
};
initialUser = mkOption {
description = ''
Some services require an initial user to access them.
This option sets the credentials for such an initial user.
'';
type = submodule {
options = {
displayName = mkOption {
type = str;
description = "Display name of the user";
};
username = mkOption {
type = str;
description = "Username for login";
};
email = mkOption {
type = str;
description = "User's email address";
};
password = mkOption {
type = str;
description = "Password for login";
};
};
};
};
};
}

View file

@ -0,0 +1,51 @@
# TODO: (partially) generate, say from nix modules, c.f. #275
variable "domain" {
type = string
default = "fediversity.net"
}
variable "mastodon" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "pixelfed" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "peertube" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "initialUser" {
type = object({
displayName = string
username = string
email = string
# TODO: mark (nested) credentials as sensitive
# https://discuss.hashicorp.com/t/is-it-possible-to-mark-an-attribute-of-an-object-as-sensitive/24649/2
password = string
})
# FIXME: remove default when the form provides this value, see #285
default = {
displayName = "Testy McTestface"
username = "test"
email = "test@test.com"
password = "testtest"
}
}

15
infra/pass-ssh-key.sh Executable file
View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
export host="$host"
mkdir -p etc/ssh
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
for keyname in ssh_host_ed25519_key ssh_host_ed25519_key.pub; do
if [[ $keyname == *.pub ]]; then
umask 0133
else
umask 0177
fi
cp "$SCRIPT_DIR/../infra/test-machines/${host}/${keyname}" ./etc/ssh/${keyname}
done

20
infra/setup.nix Normal file
View file

@ -0,0 +1,20 @@
{
pkgs,
lib,
sources,
...
}:
pkgs.writeScriptBin "setup" ''
# calculated pins
echo '${lib.strings.toJSON sources}' > sync-nix/.npins.json
# generate TF lock for nix's TF providers
for category in dev operator sync-nix; do
pushd "$category"
rm -rf .terraform/
rm -f .terraform.lock.hcl
# suppress warning on architecture-specific generated lock file:
# `Warning: Incomplete lock file information for providers`.
tofu init -input=false 1>/dev/null
popd
done
''

1
infra/shell.nix Normal file
View file

@ -0,0 +1 @@
(import ./. { }).shell

102
infra/sync-nix/main.tf Normal file
View file

@ -0,0 +1,102 @@
locals {
system = "x86_64-linux"
# dependency paths pre-calculated from npins
pins = jsondecode(file("${path.module}/.npins.json"))
# nix path: expose pins, use nixpkgs in flake commands (`nix run`)
nix_path = "${join(":", [for name, dir in local.pins : "${name}=${dir}"])}:flake=${local.pins["nixpkgs"]}:flake"
}
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
}
# TF resource to build and deploy NixOS instances.
resource "terraform_data" "nixos" {
# trigger rebuild/deploy if (FIXME?) any potentially used config/code changed,
# preventing these (20+s, build being bottleneck) when nothing changed.
# terraform-nixos separates these to only deploy if instantiate changed,
# yet building even then - which may be not as bad using deploy on remote.
# having build/deploy one resource reflects wanting to prevent no-op rebuilds
# over preventing (with less false positives) no-op deployments,
# as i could not find a way to do prevent no-op rebuilds without merging them:
# - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?).
# - `data` always runs, which is slow for deploy and especially build.
triggers_replace = [
data.external.hash.result,
var.hostname,
var.config_nix,
var.config_tf,
]
provisioner "local-exec" {
# directory to run the script from. we use the TF project root dir,
# here as a path relative from where TF is run from,
# matching calling modules' expectations on config_nix locations.
# note that absolute paths can cause false positives in triggers,
# so are generally discouraged in TF.
working_dir = path.root
environment = {
# nix path used on build, lets us refer to e.g. nixpkgs like `<nixpkgs>`
NIX_PATH = local.nix_path
}
# TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs?
# TODO: build on target?
command = <<-EOF
set -euo pipefail
# INSTANTIATE
command=(
nix-instantiate
--expr
'let
os = import <nixpkgs/nixos> {
system = "${local.system}";
configuration =
${var.config_nix} //
# template parameters passed in from TF thru json
builtins.fromJSON "${replace(jsonencode(var.config_tf), "\"", "\\\"")}" //
{
# nix path for debugging
nix.nixPath = [ "${local.nix_path}" ];
};
};
in
# info we want to get back out
{
substituters = builtins.concatStringsSep " " os.config.nix.settings.substituters;
trusted_public_keys = builtins.concatStringsSep " " os.config.nix.settings.trusted-public-keys;
drv_path = os.config.system.build.toplevel.drvPath;
out_path = os.config.system.build.toplevel;
}'
)
# instantiate the config in /nix/store
"$${command[@]}" -A out_path
# get the other info
json="$("$${command[@]}" --eval --strict --json)"
# DEPLOY
declare substituters trusted_public_keys drv_path
# set our variables using the json object
eval "export $(echo $json | jaq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
host="root@${var.hostname}.${var.vm_domain}" # FIXME: #24
buildArgs=(
--option extra-binary-caches https://cache.nixos.org/
--option substituters $substituters
--option trusted-public-keys $trusted_public_keys
)
sshOpts=(
-o BatchMode=yes
-o StrictHostKeyChecking=no
)
# get the realized derivation to deploy
outPath=$(nix-store --realize "$drv_path" "$${buildArgs[@]}")
# deploy the config by nix-copy-closure
NIX_SSHOPTS="$${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
# switch the remote host to the config
ssh "$${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; $outPath/bin/switch-to-configuration switch"
EOF
}
}

View file

@ -0,0 +1,17 @@
variable "vm_domain" {
type = string
}
variable "hostname" {
type = string
}
variable "config_nix" {
type = string
default = "{}"
}
variable "config_tf" {
type = map(any)
default = {}
}

37
infra/tests.nix Normal file
View file

@ -0,0 +1,37 @@
{ lib, pkgs }:
let
defaults = {
virtualisation = {
memorySize = 2048;
cores = 2;
};
};
tf = pkgs.callPackage ./tf.nix {
inherit lib pkgs;
};
tfEnv = pkgs.callPackage ./tf-env.nix { };
nodes = {
server = {
environment.systemPackages = [
tf
tfEnv
];
};
};
in
lib.mapAttrs (name: test: pkgs.testers.runNixOSTest (test // { inherit name; })) {
tf-validate-dev = {
inherit defaults nodes;
testScript = ''
server.wait_for_unit("multi-user.target")
server.succeed("${lib.getExe tf} -chdir='${tfEnv}/infra/dev' validate")
'';
};
tf-validate-operator = {
inherit defaults nodes;
testScript = ''
server.wait_for_unit("multi-user.target")
server.succeed("${lib.getExe tf} -chdir='${tfEnv}/infra/operator' validate")
'';
};
}

32
infra/tf-env.nix Normal file
View file

@ -0,0 +1,32 @@
{
lib,
pkgs,
sources ? import ../npins,
...
}:
pkgs.stdenv.mkDerivation {
name = "tf-repo";
src =
with lib.fileset;
toSource {
root = ../.;
# don't copy ignored files
fileset = intersection (gitTracked ../.) ../.;
};
buildInputs = [
(import ./tf.nix { inherit lib pkgs; })
(import ./setup.nix { inherit lib pkgs sources; })
];
buildPhase = ''
runHook preBuild
pushd infra
setup
popd
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r . $out
runHook postInstall
'';
}

24
infra/tf.nix Normal file
View file

@ -0,0 +1,24 @@
# FIXME: use overlays so this gets imported just once?
{
lib,
pkgs,
...
}:
let
tofuProvider =
provider:
provider.override (oldArgs: {
provider-source-address =
lib.replaceStrings [ "https://registry.terraform.io/providers" ] [ "registry.opentofu.org" ]
oldArgs.homepage;
});
tf = pkgs.opentofu;
tfPlugins = (
p: [
p.external
]
);
in
# tf.withPlugins tfPlugins
# https://github.com/NixOS/nixpkgs/pull/358522
tf.withPlugins (p: pkgs.lib.lists.map tofuProvider (tfPlugins p))

View file

@ -14,7 +14,7 @@ overwrite a secret without knowing its contents.)
In infra management, the systems' keys are used for security reasons; they
identify the machine that we are talking to. The contributor keys are used to
give access to the `root` user on these machines, which allows, among other
things, to deploy their configurations with NixOps4.
things, to deploy their configurations.
## Adding a contributor

4
machines/README.md Normal file
View file

@ -0,0 +1,4 @@
# Machines
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.

View file

@ -0,0 +1,2 @@
_: {
}

View file

@ -14,10 +14,4 @@
gateway = "2a00:51c0:13:1305::1";
};
};
nixos.module = {
imports = [
./fedipanel.nix
];
};
}

View file

@ -7,12 +7,12 @@ let
in
{
imports = [
<home-manager/nixos>
(import ../../../panel { }).module
];
security.acme = {
acceptTerms = true;
defaults.email = "beheer@procolix.com";
};
age.secrets.panel-ssh-key = {
@ -37,6 +37,24 @@ in
enable = true;
production = true;
domain = "demo.fediversity.eu";
# FIXME: make it work without this duplication
settings =
let
cfg = config.services.${name};
in
{
STATIC_ROOT = "/var/lib/${name}/static";
DEBUG = false;
ALLOWED_HOSTS = [
cfg.domain
cfg.host
"localhost"
"[::1]"
];
CSRF_TRUSTED_ORIGINS = [ "https://${cfg.domain}" ];
COMPRESS_OFFLINE = true;
LIBSASS_OUTPUT_STYLE = "compressed";
};
secrets = {
SECRET_KEY = config.age.secrets.panel-secret-key.path;
};

View file

@ -0,0 +1,31 @@
{ lib, ... }:
{
fediversityVm = {
vmId = 2116;
proxmox = "procolix";
description = "Forgejo";
ipv4.address = "185.206.232.34";
ipv6.address = "2a00:51c0:12:1201::20";
};
## vm02116 is running on old hardware based on a Xen VM environment, so it
## needs these extra options. Once the VM gets moved to a newer node, these
## two options can safely be removed.
boot.initrd.availableKernelModules = [ "xen_blkfront" ];
services.xe-guest-utilities.enable = true;
## NOTE: This VM was created manually, which requires us to override the
## default disko-based `fileSystems` definition.
fileSystems = lib.mkForce {
"/" = {
device = "/dev/disk/by-uuid/3802a66d-e31a-4650-86f3-b51b11918853";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-uuid/2CE2-1173";
fsType = "vfat";
};
};
}

View file

@ -0,0 +1,29 @@
{ lib, ... }:
{
fediversityVm = {
vmId = 2187;
proxmox = "procolix";
description = "Wiki";
ipv4.address = "185.206.232.187";
ipv6.address = "2a00:51c0:12:1201::187";
};
## NOTE: This VM was created manually, which requires us to override the
## default disko-based `fileSystems` definition.
fileSystems = lib.mkForce {
"/" = {
device = "/dev/disk/by-uuid/a46a9c46-e32b-4216-a4aa-8819b2cd0d49";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-uuid/6AB5-4FA8";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
};
}

View file

@ -0,0 +1,34 @@
{ pkgs, ... }:
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
in
{
fediversity = {
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
}

View file

@ -0,0 +1,16 @@
{ pkgs, ... }:
let
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
in
{
fediversity = {
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
enable = true;
};
};
}

View file

@ -0,0 +1,20 @@
{ pkgs, ... }:
let
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
in
{
fediversity = {
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
enable = true;
## NOTE: Only ever used for testing anyway.
##
## FIXME: Generate and store in state.
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
};
};
}

View file

@ -0,0 +1,17 @@
{ pkgs, ... }:
let
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
in
{
fediversity = {
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from TF eventually
};
}

View file

@ -1,5 +1,18 @@
{
"pins": {
"agenix": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "ryantm",
"repo": "agenix"
},
"branch": "main",
"submodules": false,
"revision": "e600439ec4c273cf11e06fe4d9d906fb98fa097c",
"url": "https://github.com/ryantm/agenix/archive/e600439ec4c273cf11e06fe4d9d906fb98fa097c.tar.gz",
"hash": "006ngydiykjgqs85cl19h9klq8kaqm5zs0ng51dnwy7nzgqxzsdr"
},
"clan-core": {
"type": "Git",
"repository": {
@ -12,6 +25,61 @@
"url": null,
"hash": "1w2gsy6qwxa5abkv8clb435237iifndcxq0s79wihqw11a5yb938"
},
"disko": {
"type": "GitRelease",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "disko"
},
"pre_releases": false,
"version_upper_bound": null,
"release_prefix": null,
"submodules": false,
"version": "v1.11.0",
"revision": "cdf8deded8813edfa6e65544f69fdd3a59fa2bb4",
"url": "https://api.github.com/repos/nix-community/disko/tarball/v1.11.0",
"hash": "13brimg7z7k9y36n4jc1pssqyw94nd8qvgfjv53z66lv4xkhin92"
},
"git-hooks": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "cachix",
"repo": "git-hooks.nix"
},
"branch": "master",
"submodules": false,
"revision": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"url": "https://github.com/cachix/git-hooks.nix/archive/dcf5072734cb576d2b0c59b2ac44f5050b5eac82.tar.gz",
"hash": "1jmdxmx29xghjiaks6f5amnxld8w3kmxb2zv8lk2yzpgp6kr60qg"
},
"gitignore": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "hercules-ci",
"repo": "gitignore.nix"
},
"branch": "master",
"submodules": false,
"revision": "637db329424fd7e46cf4185293b9cc8c88c95394",
"url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz",
"hash": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w"
},
"home-manager": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "home-manager"
},
"branch": "master",
"submodules": false,
"revision": "22b326b42bf42973d5e4fe1044591fb459e6aeac",
"url": "https://github.com/nix-community/home-manager/archive/22b326b42bf42973d5e4fe1044591fb459e6aeac.tar.gz",
"hash": "0hwllnym5mrrxinjsq0p9zn39i110c1xixp4x64svl7jjm5zb4c4"
},
"htmx": {
"type": "GitRelease",
"repository": {
@ -42,10 +110,17 @@
"hash": "1wms0wxwvxac1r1daihj5wsx1nghfk5hwdvy5cpgq481bp9x4cjn"
},
"nixpkgs": {
"type": "Channel",
"name": "nixpkgs-unstable",
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.05pre782598.18dd725c2960/nixexprs.tar.xz",
"hash": "1p7kgyph7xkj57p19nbxpycmbchc6d9gwdznsmxhymrzyzi3if21"
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nixos",
"repo": "nixpkgs"
},
"branch": "nixpkgs-unstable",
"submodules": false,
"revision": "f33a4d26226c05d501b9d4d3e5e60a3a59991921",
"url": "https://github.com/nixos/nixpkgs/archive/f33a4d26226c05d501b9d4d3e5e60a3a59991921.tar.gz",
"hash": "1b6dm1sn0bdpcsmxna0zzspjaixa2dald08005fry5jrbjvwafdj"
}
},
"version": 5

View file

@ -21,11 +21,18 @@ in
pkgs.npins
manage
];
env = import ./env.nix { inherit lib pkgs; } // {
NPINS_DIRECTORY = toString ../npins;
CREDENTIALS_DIRECTORY = toString ./.credentials;
DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3";
};
env =
let
inherit (builtins) toString;
in
import ./env.nix { inherit lib pkgs; }
// {
NPINS_DIRECTORY = toString ../npins;
CREDENTIALS_DIRECTORY = toString ./.credentials;
DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3";
# locally: use a fixed relative reference, so we can use our newest files without copying to the store
REPO_DIR = toString ../.;
};
shellHook = ''
${lib.concatStringsSep "\n" (
map (file: "ln -sf ${file.from} ${toString ./src/${file.to}}") package.generated

View file

@ -3,16 +3,15 @@
pkgs,
...
}:
let
inherit (builtins) toString;
in
{
REPO_DIR = toString ../.;
# explicitly use nix, as e.g. lix does not have configurable-impure-env
BIN_PATH = lib.makeBinPath [
# explicitly use nix, as e.g. lix does not have configurable-impure-env
pkgs.nix
# nixops error maybe due to our flake git hook: executing 'git': No such file or directory
pkgs.lix
pkgs.bash
pkgs.coreutils
pkgs.openssh
pkgs.git
pkgs.jaq # tf
(import ../infra/tf.nix { inherit lib pkgs; })
];
SSH_PRIVATE_KEY_FILE = "";
}

View file

@ -29,6 +29,9 @@ let
((pkgs.formats.pythonVars { }).generate "settings.py" cfg.settings)
(builtins.toFile "extra-settings.py" cfg.extra-settings)
];
REPO_DIR = import ../../infra/tf-env.nix {
inherit lib pkgs;
};
};
python-environment = pkgs.python3.withPackages (
@ -157,9 +160,7 @@ in
};
};
users.users.${name} = {
isNormalUser = true;
};
users.users.${name}.isNormalUser = true;
users.groups.${name} = { };
systemd.services.${name} = {
@ -167,6 +168,7 @@ in
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [
pkgs.openssh
python-environment
manage-service
];

View file

@ -1,5 +1,6 @@
{
lib,
pkgs,
sqlite,
python3,
python3Packages,
@ -14,7 +15,7 @@ let
root = ../src;
fileset = intersection (gitTracked ../../.) ../src;
};
pyproject = with lib; fromTOML pyproject-toml;
pyproject = fromTOML pyproject-toml;
# TODO: define this globally
name = "panel";
# TODO: we may want this in a file so it's easier to read statically
@ -89,7 +90,13 @@ python3.pkgs.buildPythonPackage {
mkdir -p $out/bin
cp -v ${src}/manage.py $out/bin/manage.py
chmod +x $out/bin/manage.py
wrapProgram $out/bin/manage.py --prefix PYTHONPATH : "$PYTHONPATH"
wrapProgram $out/bin/manage.py \
--set REPO_DIR "${
import ../../infra/tf-env.nix {
inherit lib pkgs;
}
}" \
--prefix PYTHONPATH : "$PYTHONPATH"
${lib.concatStringsSep "\n" (
map (file: "cp ${file.from} $out/${python3.sitePackages}/${file.to}") generated
)}

View file

@ -1,3 +1,4 @@
# TODO upstream, see #248
{
lib,
buildPythonPackage,

View file

@ -10,6 +10,7 @@ For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.2/ref/settings/
"""
import re
import sys
import os
import importlib.util
@ -18,6 +19,8 @@ import dj_database_url
from os import environ as env
from pathlib import Path
STORE_PATTERN = re.compile("^/nix/store/[^/]+$")
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
@ -242,6 +245,6 @@ if user_settings_file is not None:
# PATH to expose to launch button
bin_path=env['BIN_PATH']
# path of the root flake to trigger nixops from, see #94.
# path of the root flake to deploy from
# to deploy this should be specified, for dev just use a relative path.
repo_dir = env["REPO_DIR"]

View file

@ -1,6 +1,8 @@
from enum import Enum
import json
from os.path import expanduser
import subprocess
import logging
import os
from django.urls import reverse_lazy
@ -19,6 +21,9 @@ from pydantic import BaseModel
from panel import models, settings
from panel.configuration import schema
logger = logging.getLogger(__name__)
class Index(TemplateView):
template_name = 'index.html'
@ -106,22 +111,23 @@ class DeploymentStatus(ConfigurationForm):
}
env = {
"PATH": settings.bin_path,
# "TF_LOG": "info",
} | {
# pass in form info to our deployment
"DEPLOYMENT": config.json()
# FIXME: ensure sensitive info is protected
f"TF_VAR_{k}": v if isinstance(v, str) else json.dumps(v) for k, v in json.loads(config.model_dump_json()).items()
}
logger.info("env: %s", env)
cwd = f"{settings.repo_dir}/infra/operator"
cmd = [
"nix",
"develop",
"--extra-experimental-features",
"configurable-impure-env",
"--command",
"nixops4",
"tofu",
# f"-chdir={cwd}",
"apply",
"test",
f"-state={cwd}/terraform.tfstate", # FIXME: separate users' state, see #313
"--auto-approve",
"-lock=false",
"-parallelism=1" # limit OOM risk
]
deployment_result = subprocess.run(
cmd,
cwd=settings.repo_dir,
env=env,
)
deployment_result = subprocess.run(cmd, cwd=cwd, env=env)
logger.debug("deployment_result: %s", deployment_result)
return deployment_result, config

10
proxmox/.envrc Normal file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# the shebang is ignored, but nice for editors
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use_nix
fi

60
proxmox/README.md Normal file
View file

@ -0,0 +1,60 @@
# Infra
## Provisioning VMs with an initial configuration
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
future, I will improve the provisioning script to make this a bit less clunky.
In the future, orchestration will be able to communicate with Proxmox directly and
everything will become much cleaner.
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`.
2. Add a basic configuration for the machine. These typically go in
`infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
find inspiration.
2. Add a file for each of those VM's public keys, eg.
```
touch keys/systems/fedi117.pub
```
Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration.
FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script:
```
sh infra/proxmox-provision.sh fedi117
```
The script can take several ids at the same time. It requires some
authentication options and provides several more. See `--help`.
4. (Optional) Add a DNS entry for the machine; for instance `fedi117.abundos.eu
A 95.215.187.117`.
5. Grab the public host keys for the machines in question, and add it to the
repository. For instance:
```
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
```
FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines:
```
sh infra/machines.md.sh
```
Commit it with the machine's configuration, public key, etc.
8. At this point, the machine contains a very basic configuration that contains
just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration.
FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it.
## Removing an existing VM
See `infra/proxmox-remove.sh --help`.

23
proxmox/default.nix Normal file
View file

@ -0,0 +1,23 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
}:
{
# shell for testing TF directly
shell = pkgs.mkShellNoCC {
packages = [
pkgs.openssh
pkgs.httpie
pkgs.jq
];
};
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

View file

@ -15,7 +15,6 @@ let
installer =
{
config,
pkgs,
lib,
...

View file

@ -228,8 +228,7 @@ build_iso () {
nix build \
--impure --expr "
let flake = builtins.getFlake (builtins.toString ./.); in
flake.lib.makeInstallerIso {
import ./makeInstallerIso.nix {
nixosConfiguration = flake.nixosConfigurations.$vm_name;
nixpkgs = flake.inputs.nixpkgs;
$nix_host_keys

10
secrets/.envrc Normal file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# the shebang is ignored, but nice for editors
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use_nix
fi

View file

@ -22,7 +22,7 @@ As an example, let us add a secret in a file “cheeses” whose content should
extension); this will open your `$EDITOR` ; enter “best ones come
unpasteurised”, save and close.
3. If you are doing something flake-related such as NixOps4, remember to commit
3. If you are doing something flake-related, remember to commit
or at least stage the secret.
4. In the machine's configuration, load our `ageSecrets` NixOS module, declare the machine's host key and start using your secrets, eg.:

Some files were not shown because too many files have changed in this diff Show more