Compare commits

..

2 commits

Author SHA1 Message Date
9ae56c802c
move private key 2025-04-14 13:04:36 +02:00
e07c6e9972
buttons works deployed 2025-04-14 11:24:40 +02:00
350 changed files with 11721 additions and 3252 deletions

4
.envrc
View file

@ -3,8 +3,8 @@
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv)"
eval "$(lorri direnv --flake .)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use_nix
use flake
fi

View file

@ -1,24 +0,0 @@
name: deploy-infra
on:
workflow_dispatch: # allows manual triggering
push:
branches:
- main
jobs:
deploy:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key for age secrets and SSH
run: |
env
mkdir -p ~/.ssh
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'

View file

@ -13,46 +13,23 @@ jobs:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build -A tests
- run: nix build .#checks.x86_64-linux.pre-commit -L
check-data-model:
check-website:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-mastodon:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
- run: cd website && nix-build -A tests
- run: cd website && nix-build -A build
check-peertube:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
- run: nix build .#checks.x86_64-linux.peertube -L
check-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build -A tests.panel
check-deployment-basic:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-basic -L
check-deployment-cli:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-cli -L
check-deployment-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L
- run: cd panel && nix-build -A tests

View file

@ -1,24 +0,0 @@
name: update-dependencies
on:
workflow_dispatch: # allows manual triggering
# FIXME: re-enable when manual run works
# schedule:
# - cron: '0 0 1 * *' # monthly
jobs:
lockfile:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update pins
run: nix-shell --run "npins update"
- name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
with:
remote-instance-api-version: v1
token: "${{ secrets.DEPLOY_KEY }}"
branch: npins-update
commit-message: "npins: update sources"
title: "npins: update sources"

View file

@ -14,12 +14,6 @@ There already exist solutions for self-hosting, but they're not suitable for wha
The ones we're aware of require substantial technical knowledge and time commitment by operators, especially for scaling to thousands of users.
Not everyone has the expertise and time to run their own server.
## Interactions
To reach these goals, we aim to implement the following interactions between [actors](#actors) (depicted with rounded corners) and system components (see the [glossary](#glossary), depicted with rectangles).
![](https://git.fediversity.eu/Fediversity/meta/raw/branch/main/architecture-docs/interactions.svg)
## Actors
- Fediversity project team
@ -63,11 +57,11 @@ To reach these goals, we aim to implement the following interactions between [ac
- [Fediverse](https://en.wikipedia.org/wiki/Fediverse)
A collection of social networking applications that can communicate with each other using a common protocol.
A collection of social networking services that can communicate with each other using a common protocol.
- Application
- Service
User-facing software (e.g. from Fediverse) run by the hosting provider for an operator.
A Fediverse application run by the hosting provider for an operator.
- Configuration
@ -79,11 +73,11 @@ To reach these goals, we aim to implement the following interactions between [ac
Make a resource, such as a virtual machine, available for use.
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for applications run by operators.
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for services run by operators.
- Deploy
Put software, such as applications, onto computers.
Put software, such as services, onto computers.
The software includes technical configuration that links software components.
Most user-facing configuration remains untouched by the deployment process.
@ -91,7 +85,7 @@ To reach these goals, we aim to implement the following interactions between [ac
- Migrate
Move service configurations and deployment state, including user data, from one hosting provider to another.
Move service configurations and user data to a different hosting provider.
- [NixOps4](https://github.com/nixops4/nixops4)
@ -109,18 +103,6 @@ To reach these goals, we aim to implement the following interactions between [ac
> Example: We need a resource provider for obtaining deployment secrets from a database.
- Runtime backend
A type of digital environment one can run operating systems such as NixOS on, e.g. bare-metal, a hypervisor, or a container runtime.
- Runtime environment
The thing a deployment runs on, an interface against which the deployment is working. See runtime backend.
- Runtime config
Configuration logic specific to a runtime backend, e.g. how to deploy, how to access object storage.
## Development
All the code made for this project is freely licenced under [EUPL](https://en.m.wikipedia.org/wiki/European_Union_Public_Licence).
@ -154,3 +136,6 @@ details as to what they are for. As an overview:
- [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

View file

@ -1,85 +0,0 @@
{
system ? builtins.currentSystem,
sources ? import ./npins,
pkgs ? import sources.nixpkgs { inherit system; },
}:
let
inherit (sources)
nixpkgs
git-hooks
gitignore
;
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
gitignore-nix-src = {
lib = import gitignore { inherit lib; };
};
}).run
{
src = ./.;
hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [
"npins"
];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
};
in
{
# shell for testing TF directly
shell = pkgs.mkShellNoCC {
inherit (pre-commit-check) shellHook;
buildInputs = pre-commit-check.enabledPackages;
packages =
let
test-loop = pkgs.writeShellApplication {
name = "test-loop";
runtimeInputs = [
pkgs.watchexec
pkgs.nix-unit
];
text = ''
watchexec -w ${builtins.toString ./.} -- nix-unit ${builtins.toString ./deployment/data-model-test.nix} "$@"
'';
};
in
[
pkgs.npins
pkgs.nil
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
pkgs.nix-unit
test-loop
nixops4.packages.${system}.default
];
};
tests = {
inherit pre-commit-check;
panel = panel.tests;
};
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

View file

@ -1,123 +1,6 @@
# Deployment
This directory contains work to generate a full Fediversity deployment from a minimal configuration.
This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services.
## Data model
The core piece of the project is the [Fediversity data model](./data-model.nix), which describes all entities and their interactions.
What can be done with it is exemplified in the [evaluation tests](./data-model-test.nix).
Run `test-loop` in the development environment when hacking on the data model or adding tests.
## Checks
There are three levels of deployment checks: `basic`, `cli`, `panel`.
They can be found in subdirectories of [`check/`](./check).
They can be run as part of `nix flake check` or individually as:
``` console
$ nix build .#checks.<system>.deployment-<name> -vL
```
Since `nixops4 apply` operates on a flake, the tests take this repository's flake as a template.
This also why there are some dummy files that will be overwritten inside the test.
### Basic deployment check
The basic deployment check is here as a building block and sanity check.
It does not actually use any of the code in this directory but checks that our test strategy is sound and that basic NixOps4 functionalities are here.
It is a NixOS test featuring one deployer machine and two target machines.
The deployment simply adds `pkgs.hello` to one and `pkgs.cowsay` to the other.
It is heavily inspired by [a similar test in `nixops4-nixos`].
[a similar test in nixops4-nixos]: https://github.com/nixops4/nixops4-nixos/blob/main/test/default/nixosTest.nix
This test involves three nodes:
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
- “target machines” are two eponymous nodes on which the packages `hello` and `cowsay` will be deployed.
They start with a minimal configuration.
``` mermaid
flowchart LR
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
subgraph target_machines["target machines"]
direction TB
hello
cowsay
end
deployer -->|deploys| target_machines
```
### Service deployment check using `nixops4 apply`
This check omits the panel by running a direct invocation of NixOps4.
It deploys some services and checks that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic deployment check.
This test involves seven nodes:
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
- “target machines” are four nodes — `garage`, `mastodon`, `peertube`, and `pixelfed` — on which the services will be deployed.
They start with a minimal configuration.
- `acme` is a node that runs [Pebble], a miniature ACME server to deliver the certificates that the services expect.
- [WIP] `client` is a node that runs a browser controlled by some Selenium scripts in order to check that the services are indeed running and are accessible.
[Pebble]: https://github.com/letsencrypt/pebble
``` mermaid
flowchart LR
classDef invisible fill:none,stroke:none
subgraph left [" "]
direction TB
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
client["client<br><font size='1'>Selenium scripts</font>"]
end
subgraph middle [" "]
subgraph target_machines["target machines"]
direction TB
garage
mastodon
peertube
pixelfed
end
end
subgraph right [" "]
direction TB
acme["acme<br><font size='1'>runs Pebble</font>"]
end
left ~~~ middle ~~~ right
class left,middle,right invisible
deployer -->|deploys| target_machines
client -->|tests| mastodon
client -->|tests| peertube
client -->|tests| pixelfed
target_machines -->|get certs| acme
```
### Service deployment check from the FediPanel
This is a full deployment check running the [FediPanel](../panel) on the deployer machine, deploying some services through it and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic and CLI deployment checks, the only difference being that `deployer` runs NixOps4 only indirectly via the panel, and the `client` node is the one that triggers the deployment via a browser, the way a human would.
This repository contains work to generate a full Fediversity deployment from a
minimal configuration. This is different from [`../services/`](../services) that
focuses on one machine, providing a polished and unified interface to different
Fediverse services.

View file

@ -1,8 +0,0 @@
{
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
}

View file

@ -1,14 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
}

View file

@ -1,36 +0,0 @@
{
inputs,
sources,
lib,
providers,
...
}:
let
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
in
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = lib.genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
}

View file

@ -1,22 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-basic = {
imports = [ ./deployment/check/basic/deployment.nix ];
_module.args = { inherit inputs sources; };
};
}
);
}

View file

@ -1,48 +0,0 @@
{ inputs, lib, ... }:
{
_class = "nixosTest";
name = "deployment-basic";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${pkgs.system}.default
];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
cowsay
];
};
};
extraTestScript = ''
with subtest("Check the status before deployment"):
hello.fail("hello 1>&2")
cowsay.fail("cowsay 1>&2")
with subtest("Run the deployment"):
deployer.succeed("nixops4 apply check-deployment-basic --show-trace --no-interactive 1>&2")
with subtest("Check the deployment"):
hello.succeed("hello 1>&2")
cowsay.succeed("cowsay hi 1>&2")
'';
}

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,59 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
}

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments = import ./deployment/check/cli/deployments.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,137 +0,0 @@
{
inputs,
hostPkgs,
lib,
...
}:
let
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "";
in
{
_class = "nixosTest";
name = "deployment-cli";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployments.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in panel test)
../../default.nix
../../options.nix
../../configuration.sample.json
../../../services/fediversity
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${pkgs.system}.default
];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-mastodon-pixelfed --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-peertube --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,106 +0,0 @@
{
inputs,
lib,
pkgs,
config,
sources,
...
}:
let
inherit (lib)
mkOption
mkForce
concatLists
types
;
in
{
_class = "nixos";
imports = [ ./sharedOptions.nix ];
options.system.extraDependenciesFromModule = mkOption {
type = types.deferredModule;
description = ''
Grab the derivations needed to build the given module and dump them in
system.extraDependencies. You want to put in this module a superset of
all the things that you will need on your target machines.
NOTE: This will work as long as the union of all these configurations do
not have conflicts that would prevent evaluation.
'';
default = { };
};
config = {
virtualisation = {
## NOTE: The deployer machines needs more RAM and default than the
## default. These values have been trimmed down to the gigabyte.
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4 * 1024;
diskSize = 4 * 1024;
cores = 2;
};
nix.settings = {
substituters = mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
extra-experimental-features = "flakes";
};
system.extraDependencies =
[
inputs.nixops4
inputs.nixops4-nixos
inputs.nixpkgs
sources.flake-parts
sources.flake-inputs
sources.git-hooks
pkgs.stdenv
pkgs.stdenvNoCC
]
++ (
let
## We build a whole NixOS system that contains the module
## `system.extraDependenciesFromModule`, only to grab its
## configuration and the store paths needed to build it and
## dump them in `system.extraDependencies`.
machine =
(pkgs.nixos [
./targetNode.nix
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.acmeNodeIP;
}
]).config;
in
[
machine.system.build.toplevel.inputDerivation
machine.system.build.etc.inputDerivation
machine.system.build.etcBasedir.inputDerivation
machine.system.build.etcMetadataImage.inputDerivation
machine.system.build.extraUtils.inputDerivation
machine.system.path.inputDerivation
machine.system.build.setEnvironment.inputDerivation
machine.system.build.vm.inputDerivation
machine.system.build.bootStage1.inputDerivation
machine.system.build.bootStage2.inputDerivation
]
++ concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) machine.environment.etc
)
);
};
}

View file

@ -1,200 +0,0 @@
{
inputs,
lib,
config,
hostPkgs,
sources,
...
}:
let
inherit (builtins)
concatStringsSep
toJSON
;
inherit (lib)
types
fileset
mkOption
genAttrs
attrNames
optionalString
;
inherit (hostPkgs)
runCommandNoCC
writeText
system
;
forConcat = xs: f: concatStringsSep "\n" (map f xs);
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
in
{
_class = "nixosTest";
imports = [
./sharedOptions.nix
];
options = {
## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { };
sourceFileset = mkOption {
## REVIEW: Upstream to nixpkgs?
type = types.mkOptionType {
name = "fileset";
description = "fileset";
descriptionClass = "noun";
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
merge = (_: defs: fileset.unions (map (x: x.value) defs));
};
description = ''
A fileset that will be copied to the deployer node in the current
working directory. This should contain all the files that are
necessary to run that particular test, such as the NixOS
modules necessary to evaluate a deployment.
'';
};
};
config = {
sourceFileset = fileset.unions [
# NOTE: not the flake itself; it will be overridden.
../../../mkFlake.nix
../../../flake.lock
../../../npins
./sharedOptions.nix
./targetNode.nix
./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
nodes =
{
deployer = {
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
};
}
//
(
if config.enableAcme then
{
acme = {
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
## break things once we play with DNS?
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
## We aren't testing ACME - we just want certificates.
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
};
}
else
{ }
)
//
genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
testScript = ''
${forConcat (attrNames config.nodes) (n: ''
${n}.start()
'')}
${forConcat (attrNames config.nodes) (n: ''
${n}.wait_for_unit("multi-user.target")
'')}
## A subset of the repository that is necessary for this test. It will be
## copied inside the test. The smaller this set, the faster our CI, because we
## won't need to re-run when things change outside of it.
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${
fileset.toSource {
root = ../../..;
fileset = config.sourceFileset;
}
}/* .")
with subtest("Configure the network"):
${forConcat config.targetMachines (
tm:
let
targetNetworkJSON = writeText "target-network.json" (
toJSON config.nodes.${tm}.system.build.networkConfig
);
in
''
deployer.copy_from_host("${targetNetworkJSON}", "${config.pathFromRoot}/${tm}-network.json")
''
)}
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
${forConcat config.targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')}
with subtest("Configure the target host key"):
${forConcat config.targetMachines (tm: ''
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
'')}
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
##
## NOTE: We use the repository as-is (cf `src` above), overriding only
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
## lock file to use locally available inputs, as we cannot download them.
##
with subtest("Override the flake and its lock"):
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
deployer.succeed("""
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
;
""")
${optionalString config.enableAcme ''
with subtest("Set up handmade DNS"):
deployer.succeed("echo '${config.nodes.acme.networking.primaryIPAddress}' > ${config.pathFromRoot}/acme_server_ip")
''}
${config.extraTestScript}
'';
};
}

View file

@ -1,68 +0,0 @@
/**
This file contains options shared by various components of the integration test, i.e. deployment resources, test nodes, target configurations, etc.
All these components are declared as modules, but are part of different evaluations, which is the options in this file can't be shared "directly".
Instead, each component imports this module and the same values are set for each of them from a common call site.
Not all components will use all the options, which allows not setting all the values.
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
in
# `config` not set and imported from multiple places: no fixed module class
{
options = {
targetMachines = mkOption {
type = with types; listOf str;
description = ''
Names of the nodes in the NixOS test that are target machines. This is
used by the infrastructure to extract their network configuration, among
other things, and re-import it in the deployment.
'';
};
pathToRoot = mkOption {
type = types.path;
description = ''
Path from the location of the working directory to the root of the
repository.
'';
};
pathFromRoot = mkOption {
type = types.path;
description = ''
Path from the root of the repository to the working directory.
'';
apply = x: lib.path.removePrefix config.pathToRoot x;
};
pathToCwd = mkOption {
type = types.path;
description = ''
Path to the current working directory. This is a shortcut for
pathToRoot/pathFromRoot.
'';
default = config.pathToRoot + "/${config.pathFromRoot}";
};
enableAcme = mkOption {
type = types.bool;
description = ''
Whether to enable ACME in the NixOS test. This will add an ACME server
to the node and connect all the target machines to it.
'';
default = false;
};
acmeNodeIP = mkOption {
type = types.str;
description = ''
The IP of the ACME node in the NixOS test. This option will be set
during the test to the correct value.
'';
};
};
}

View file

@ -1,67 +0,0 @@
{
inputs,
config,
lib,
modulesPath,
...
}:
let
testCerts = import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix";
inherit (lib) mkIf mkMerge;
in
{
_class = "nixos";
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
config = mkMerge [
{
## Test framework disables switching by default. That might be OK by itself,
## but we also use this config for getting the dependencies in
## `deployer.system.extraDependencies`.
system.switch.enable = true;
nix = {
## Not used; save a large copy operation
channel.enable = false;
registry = lib.mkForce { };
};
services.openssh = {
enable = true;
settings.PermitRootLogin = "yes";
};
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
}
(mkIf config.enableAcme {
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
## NOTE: This certificate is the one used by the Pebble HTTPS server.
## This is NOT the root CA of the Pebble server. We do add it here so
## that Pebble clients can talk to its API, but this will not allow
## those machines to verify generated certificates.
testCerts.ca.cert
];
## FIXME: it is a bit sad that all this logistics is necessary. look into
## better DNS stuff
networking.extraHosts = "${config.acmeNodeIP} acme.test";
})
];
}

View file

@ -1,51 +0,0 @@
{
inputs,
lib,
config,
sources,
...
}:
let
inherit (builtins) readFile;
inherit (lib) trim mkOption types;
in
{
_class = "nixops4Resource";
imports = [ ./sharedOptions.nix ];
options = {
nodeName = mkOption {
type = types.str;
description = ''
The name of the node in the NixOS test;
needed for recovering the node configuration to prepare its deployment.
'';
};
};
config = {
ssh = {
host = config.nodeName;
hostPublicKey = readFile (config.pathToCwd + "/${config.nodeName}_host_key.pub");
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [
./targetNode.nix
(lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json"))
];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip"));
nixpkgs.hostPlatform = "x86_64-linux";
};
};
}

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,58 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
args;
in
makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env == "" then
throw "The DEPLOYMENT environment needs to be set. You do not want to use this deployment unless in the `deployment-panel` NixOS test."
else
env
)
)

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-panel = import ./deployment/check/panel/deployment.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,377 +0,0 @@
{
inputs,
lib,
hostPkgs,
config,
...
}:
let
inherit (lib)
getExe
;
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "dummy";
panelPort = 8000;
panelUser = "test";
panelEmail = "test@test.com";
panelPassword = "ouiprdaaa43"; # panel's manager complains if too close to username or email
fediUser = "test";
fediEmail = "test@test.com";
fediPassword = "testtest";
fediName = "Testy McTestface";
toPythonBool = b: if b then "True" else "False";
interactWithPanel =
{
baseUri,
enableMastodon,
enablePeertube,
enablePixelfed,
}:
hostPkgs.writers.writePython3Bin "interact-with-panel"
{
libraries = with hostPkgs.python3Packages; [ selenium ];
flakeIgnore = [
"E302" # expected 2 blank lines, found 0
"E303" # too many blank lines
"E305" # expected 2 blank lines after end of function or class
"E501" # line too long (> 79 characters)
"E731" # do not assign lambda expression, use a def
];
}
''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
print("Create and configure driver...")
options = Options()
options.add_argument("--headless")
options.binary_location = "${getExe hostPkgs.firefox-unwrapped}"
service = webdriver.FirefoxService(executable_path="${getExe hostPkgs.geckodriver}")
driver = webdriver.Firefox(options=options, service=service)
driver.set_window_size(1280, 960)
driver.implicitly_wait(360)
driver.command_executor.set_timeout(3600)
print("Open login page...")
driver.get("${baseUri}/login/")
print("Enter username...")
driver.find_element(By.XPATH, "//input[@name = 'username']").send_keys("${panelUser}")
print("Enter password...")
driver.find_element(By.XPATH, "//input[@name = 'password']").send_keys("${panelPassword}")
print("Click Login button...")
driver.find_element(By.XPATH, "//button[normalize-space() = 'Login']").click()
print("Open configuration page...")
driver.get("${baseUri}/configuration/")
# Helpers to actually set and not add or switch input values.
def input_set(elt, keys):
elt.clear()
elt.send_keys(keys)
def checkbox_set(elt, new_value):
if new_value != elt.is_selected():
elt.click()
print("Enable Fediversity...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'enable']"), True)
print("Fill in initialUser info...")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.username']"), "${fediUser}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.password']"), "${fediPassword}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.email']"), "${fediEmail}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.displayName']"), "${fediName}")
print("Enable services...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
print("Start deployment...")
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
print("Wait for deployment status to show up...")
get_deployment_result = lambda d: d.find_element(By.XPATH, "//div[@id = 'deployment-result']//p")
WebDriverWait(driver, timeout=3660, poll_frequency=10).until(get_deployment_result)
deployment_result = get_deployment_result(driver).get_attribute('innerHTML')
print("Quit...")
driver.quit()
match deployment_result:
case 'Deployment Succeeded':
print("Deployment has succeeded; exiting normally")
exit(0)
case 'Deployment Failed':
print("Deployment has failed; exiting with return code `1`")
exit(1)
case _:
print(f"Unexpected deployment result: {deployment_result}; exiting with return code `2`")
exit(2)
'';
in
{
_class = "nixosTest";
name = "deployment-panel";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in CLI test)
../../default.nix
../../options.nix
../../../services/fediversity
];
## The panel's module sets `nixpkgs.overlays` which clashes with
## `pkgsReadOnly`. We disable it here.
node.pkgsReadOnly = false;
nodes.deployer =
{ pkgs, ... }:
{
imports = [
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;
domain = "deployer";
secrets = {
SECRET_KEY = dummyFile;
};
port = panelPort;
deployment = {
flake = "/run/fedipanel/flake";
name = "check-deployment-panel";
};
};
environment.systemPackages = [ pkgs.expect ];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy # a configuration checker for nginx
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
nodes.client =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
httpie
dnsutils # for `dig`
openssl
cacert
wget
python3
python3Packages.selenium
firefox-unwrapped
geckodriver
];
security.pki.certificateFiles = [
config.nodes.acme.test-support.acme.caCert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
## TODO: We want a nicer way to control where the FediPanel consumes its
## flake, which can default to the store but could also be somewhere else if
## someone wanted to change the code of the flake.
##
with subtest("Give the panel access to the flake"):
deployer.succeed("mkdir /run/fedipanel /run/fedipanel/flake >&2")
deployer.succeed("cp -R . /run/fedipanel/flake >&2")
deployer.succeed("chown -R panel:panel /run/fedipanel >&2")
## TODO: I want a programmatic way to provide an SSH key to the panel (and
## therefore NixOps4). This should happen either in the Python code, but
## maybe it is fair that that one picks up on the user's key? It could
## also be in the Nix packaging.
##
with subtest("Set up the panel's SSH keys"):
deployer.succeed("mkdir /home/panel/.ssh >&2")
deployer.succeed("cp -R /root/.ssh/* /home/panel/.ssh >&2")
deployer.succeed("chown -R panel:panel /home/panel/.ssh >&2")
deployer.succeed("chmod 600 /home/panel/.ssh/* >&2")
## TODO: This is a hack to accept the root CA used by Pebble on the client
## machine. Pebble randomizes everything, so the only way to get it is to
## call the /roots/0 endpoint at runtime, leaving not much margin for a nice
## Nixy way of adding the certificate. There is no way around it as this is
## by design in Pebble, showing in fact that Pebble was not the appropriate
## tool for our use and that nixpkgs does not in fact provide an easy way to
## generate _usable_ certificates in NixOS tests. I suggest we merge this,
## and track the task to set it up in a cleaner way. I would tackle this in
## a subsequent PR, and hopefully even contribute this BetterWay(tm) to
## nixpkgs. — Niols
##
with subtest("Set up ACME root CA on client"):
client.succeed("""
cd /etc/ssl/certs
curl -o pebble-root-ca.pem https://acme.test:15000/roots/0
curl -o pebble-intermediate-ca.pem https://acme.test:15000/intermediates/0
{ cat ca-bundle.crt
cat pebble-root-ca.pem
cat pebble-intermediate-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
""")
## TODO: I would hope for a more declarative way to add users. This should
## be handled by the Nix packaging of the FediPanel. — Niols
##
with subtest("Create panel user"):
deployer.succeed("""
expect -c '
spawn manage createsuperuser --username ${panelUser} --email ${panelEmail}
expect "Password: "; send "${panelPassword}\\n";
expect "Password (again): "; send "${panelPassword}\\n"
interact
' >&2
""")
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = false;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = true;
enablePeertube = false;
enablePixelfed = true;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = true;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,70 +0,0 @@
let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit inputs;
};
modules = [
module
./data-model.nix
];
}).config;
in
{
_class = "nix-unit";
test-eval = {
expr =
let
fediversity = eval (
{ config, ... }:
{
config = {
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options = {
enable = lib.mkEnableOption "Hello in the shell";
};
};
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
};
};
options = {
example-configuration = mkOption {
type = config.configuration;
readOnly = true;
default = {
enable = true;
applications.hello.enable = true;
};
};
};
}
);
in
{
inherit (fediversity)
example-configuration
;
};
expected = {
example-configuration = {
enable = true;
applications.hello.enable = true;
};
};
};
}

View file

@ -1,89 +0,0 @@
{
lib,
config,
...
}:
let
inherit (lib) mkOption types;
inherit (lib.types)
attrsOf
attrTag
deferredModuleWith
submodule
optionType
functionTo
;
functionType = import ./function.nix;
application-resources = {
options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
);
};
};
in
{
_class = "nixops4Deployment";
options = {
applications = mkOption {
description = "Collection of Fediversity applications";
type = attrsOf (
submodule (application: {
_class = "fediversity-application";
options = {
description = mkOption {
description = "Description to be shown in the application overview";
type = types.str;
};
module = mkOption {
description = "Operator-facing configuration options for the application";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-application-config"; } ]; };
};
implementation = mkOption {
description = "Mapping of application configuration to deployment resources, a description of what an application needs to run";
type = application.config.config-mapping.function-type;
};
resources = mkOption {
description = "Compute resources required by an application";
type = functionTo application.config.config-mapping.output-type;
readOnly = true;
default = input: (application.config.implementation input).output;
};
config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType;
readOnly = true;
default = {
input-type = application.config.module;
output-type = application-resources;
};
};
};
})
);
};
configuration = mkOption {
description = "Configuration type declaring options to be set by operators";
type = optionType;
readOnly = true;
default = submodule {
options = {
enable = lib.mkEnableOption {
description = "your Fediversity configuration";
};
applications = lib.mapAttrs (
_name: application:
mkOption {
description = application.description;
type = submodule application.module;
default = { };
}
) config.applications;
};
};
};
};
}

View file

@ -33,186 +33,149 @@
## information coming from the FediPanel.
##
## FIXME: lock step the interface with the definitions in the FediPanel
panelConfigNullable:
panelConfig:
let
inherit (lib) mkIf;
## The convertor from module options to JSON schema does not generate proper
## JSON schema types, forcing us to use nullable fields for default values.
## However, working with those fields in the deployment code is annoying (and
## unusual for Nix programmers), so we sanitize the input here and add back
## the default value by hand.
nonNull = x: v: if x == null then v else x;
panelConfig = {
domain = nonNull panelConfigNullable.domain "fediversity.net";
initialUser = nonNull panelConfigNullable.initialUser {
displayName = "Testy McTestface";
username = "test";
password = "testtest";
email = "test@test.com";
};
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
peertube = nonNull panelConfigNullable.peertube { enable = false; };
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
};
in
## Regular arguments of a NixOps4 deployment module.
{ config, providers, ... }:
{ providers, ... }:
let
cfg = config.deployment;
in
{
_class = "nixops4Deployment";
providers = { inherit (nixops4.modules.nixops4Provider) local; };
options = {
deployment = lib.mkOption {
description = ''
Configuration to be deployed
'';
# XXX(@fricklerhandwerk):
# misusing this will produce obscure errors that will be truncated by NixOps4
type = lib.types.submodule ./options.nix;
default = panelConfig;
};
};
config = {
providers = { inherit (nixops4.modules.nixops4Provider) local; };
resources =
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
makeConfigurationResource = resourceModule: config: {
type = providers.local.exec;
imports = [
nixops4-nixos.modules.nixops4Resource.nixos
resourceModule
{
## NOTE: With NixOps4, there are several levels and all of them live
## in the NixOS module system:
##
## 1. Each NixOps4 deployment is a module.
## 2. Each NixOps4 resource is a module. This very comment is
## inside an attrset imported as a module in a resource.
## 3. Each NixOps4 'configuration' resource contains an attribute
## 'nixos.module', itself a NixOS configuration module.
nixos.module =
{ ... }:
{
imports = [
config
fediversity
];
};
}
];
resources =
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
in
makeConfigurationResource = resourceModule: config: {
type = providers.local.exec;
imports = [
nixops4-nixos.modules.nixops4Resource.nixos
resourceModule
{
garage-configuration = makeConfigurationResource garageConfigurationResource (
{ pkgs, ... }:
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) {
fediversity = {
inherit (cfg) domain;
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
{
## NOTE: With NixOps4, there are several levels and all of them live
## in the NixOS module system:
##
## 1. Each NixOps4 deployment is a module.
## 2. Each NixOps4 resource is a module. This very comment is
## inside an attrset imported as a module in a resource.
## 3. Each NixOps4 'configuration' resource contains an attribute
## 'nixos.module', itself a NixOS configuration module.
nixos.module =
{ ... }:
{
imports = [
config
fediversity
];
};
}
);
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
{ pkgs, ... }:
mkIf cfg.mastodon.enable {
fediversity = {
inherit (cfg) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
};
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
}
);
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
{ pkgs, ... }:
mkIf cfg.peertube.enable {
fediversity = {
inherit (cfg) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
};
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
enable = true;
## NOTE: Only ever used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
};
};
}
);
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
{ pkgs, ... }:
mkIf cfg.pixelfed.enable {
fediversity = {
inherit (cfg) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
};
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
enable = true;
};
};
}
);
];
};
};
in
{
garage-configuration = makeConfigurationResource garageConfigurationResource (
{ pkgs, ... }:
mkIf (panelConfig.mastodon.enable || panelConfig.peertube.enable || panelConfig.pixelfed.enable) {
fediversity = {
inherit (panelConfig) domain;
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
}
);
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.mastodon.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
}
);
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.peertube.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
enable = true;
## NOTE: Only ever used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
};
};
}
);
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.pixelfed.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
enable = true;
};
};
}
);
};
}

View file

@ -1,26 +0,0 @@
{ inputs, sources, ... }:
{
_class = "flake";
perSystem =
{ pkgs, ... }:
{
checks = {
deployment-basic = import ./check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-cli = import ./check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-panel = import ./check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
};
};
}

View file

@ -1,37 +0,0 @@
/**
Modular function type
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
inherit (types)
deferredModule
submodule
functionTo
optionType
;
in
{
options = {
input-type = mkOption {
type = deferredModule;
};
output-type = mkOption {
type = deferredModule;
};
function-type = mkOption {
type = optionType;
readOnly = true;
default = functionTo (submodule {
options = {
input = mkOption {
type = submodule config.input-type;
};
output = mkOption {
type = submodule config.output-type;
};
};
});
};
};
}

View file

@ -1,104 +0,0 @@
/**
Deployment options as to be presented in the front end.
These are converted to JSON schema in order to generate front-end forms etc.
For this to work, options must not have types `functionTo` or `package`, and must not access `config` for their default values.
The options are written in a cumbersome way because the JSON schema converter can't evaluate a submodule option's default value, which thus all must be set to `null`.
This can be fixed if we made the converter aware of [`$defs`], but that would likely amount to half a rewrite.
[`$defs`]: https://json-schema.org/understanding-json-schema/structuring#defs
*/
{
lib,
...
}:
let
inherit (lib) types mkOption;
in
{
_class = "nixops4Deployment";
options = {
enable = lib.mkEnableOption "Fediversity configuration";
domain = mkOption {
type =
with types;
enum [
"fediversity.net"
];
description = ''
Apex domain under which the services will be deployed.
'';
default = "fediversity.net";
};
pixelfed = mkOption {
description = ''
Configuration for the Pixelfed service
'';
type =
with types;
nullOr (submodule {
options = {
enable = lib.mkEnableOption "Pixelfed";
};
});
default = null;
};
peertube = mkOption {
description = ''
Configuration for the PeerTube service
'';
type =
with types;
nullOr (submodule {
options = {
enable = lib.mkEnableOption "Peertube";
};
});
default = null;
};
mastodon = mkOption {
description = ''
Configuration for the Mastodon service
'';
type =
with types;
nullOr (submodule {
options = {
enable = lib.mkEnableOption "Mastodon";
};
});
default = null;
};
initialUser = mkOption {
description = ''
Some services require an initial user to access them.
This option sets the credentials for such an initial user.
'';
type =
with types;
nullOr (submodule {
options = {
displayName = mkOption {
type = types.str;
description = "Display name of the user";
};
username = mkOption {
type = types.str;
description = "Username for login";
};
email = mkOption {
type = types.str;
description = "User's email address";
};
password = mkOption {
type = types.str;
description = "Password for login";
};
};
});
default = null;
};
};
}

815
flake.lock generated

File diff suppressed because it is too large Load diff

106
flake.nix
View file

@ -1,52 +1,78 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
home-manager.url = "github:nix-community/home-manager";
home-manager.inputs.nixpkgs.follows = "nixpkgs";
agenix.url = "github:ryantm/agenix";
disko.url = "github:nix-community/disko";
nixops4.url = "github:nixops4/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
"${sources.git-hooks}/flake-module.nix"
inputs.nixops4.modules.flake.default
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
./deployment/flake-part.nix
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
imports = [
inputs.git-hooks.flakeModule
inputs.nixops4.modules.flake.default
perSystem =
{
pkgs,
lib,
system,
...
}:
{
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
./infra/flake-part.nix
./services/flake-part.nix
];
perSystem =
{
config,
pkgs,
lib,
inputs',
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [
"npins"
"launch/.terraform"
];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
devShells.default = pkgs.mkShell {
packages = [
pkgs.nil
inputs'.agenix.packages.default
pkgs.openssh
pkgs.httpie
pkgs.jq
# exposing this env var as a hack to pass info in from form
(inputs'.nixops4.packages.default.overrideAttrs {
impureEnvVars = [ "DEPLOYMENT" ];
})
];
shellHook = config.pre-commit.installationScript;
};
}
);
};
};
}

View file

@ -1,19 +1,20 @@
# Infra
This directory contains the definition of [the VMs](../machines/machines.md) that host our
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.
## Provisioning VMs with an initial configuration
> NOTE[Niols]: This is still very manual and clunky. Two things will happen:
> 1. In the near future, I will improve the provisioning script to make this a bit less clunky.
> 2. In the far future, NixOps4 will be able to communicate with Proxmox directly and everything will become much cleaner.
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
future, I will improve the provisioning script to make this a bit less clunky.
In the far future, NixOps4 will be able to communicate with Proxmox directly and
everything will become much cleaner.
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`.
2. Add a basic configuration for the machine. These typically go in
`machines/dev/<name>/default.nix`. You can look at other `fediXXX` VMs to
`infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
find inspiration. You probably do not need a `nixos.module` option at this
point.
@ -24,7 +25,8 @@ infrastructure.
Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration.
> FIXME: Remove this step by making the provisioning script not fail with the public key does not exist yet.
FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script:
```
@ -42,11 +44,11 @@ infrastructure.
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
```
> FIXME: Make the provisioning script do that for us.
FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines:
```
sh machines/machines.md.sh
sh infra/machines.md.sh
```
Commit it with the machine's configuration, public key, etc.
@ -54,7 +56,7 @@ infrastructure.
just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration.
> FIXME: Figure out why the full configuration isn't on the machine at this
FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it.
## Updating existing VM configurations

BIN
infra/architecture.pdf Normal file

Binary file not shown.

View file

@ -1,13 +1,12 @@
{ lib, ... }:
{ lib, pkgs, ... }:
let
inherit (lib) mkDefault;
nixPath = "/run/current-system/nixpkgs";
in
{
_class = "nixos";
imports = [
./hardware.nix
./networking.nix
./users.nix
];
@ -17,15 +16,19 @@ in
system.stateVersion = "24.05"; # do not change
nixpkgs.hostPlatform = mkDefault "x86_64-linux";
# use flake's nixpkgs over channels
nix.nixPath = [ "nixpkgs=${nixPath}" ];
system.extraSystemBuilderCmds = ''
ln -sv ${pkgs.path} $out/nixpkgs
'';
systemd.tmpfiles.rules = [
"L+ ${nixPath} - - - - ${pkgs.path}"
];
## This is just nice to have, but it is also particularly important for the
## Forgejo CI runners because the Nix configuration in the actions is directly
## taken from here.
nix.extraOptions = ''
experimental-features = nix-command flakes
'';
boot.loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
}

View file

@ -1,16 +1,20 @@
{ sources, ... }:
{
_class = "nixos";
{ modulesPath, ... }:
imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
];
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot = {
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
initrd = {
availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];

View file

@ -1,64 +1,63 @@
{ config, lib, ... }:
let
inherit (lib) mkDefault mkIf mkMerge;
inherit (lib) mkDefault;
in
{
_class = "nixos";
config = {
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = mkMerge [
{
hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain;
networking = {
hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain;
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
## Disable the default firewall and use nftables instead, with a custom
## Procolix-made ruleset.
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
interfaces = {
eth0 = {
ipv4 = {
addresses = [
{
inherit (config.fediversityVm.ipv4) address prefixLength;
}
];
};
ipv6 = {
addresses = [
{
inherit (config.fediversityVm.ipv6) address prefixLength;
}
];
};
};
}
};
## IPv4
(mkIf config.fediversityVm.ipv4.enable {
interfaces.${config.fediversityVm.ipv4.interface}.ipv4.addresses = [
{ inherit (config.fediversityVm.ipv4) address prefixLength; }
];
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = config.fediversityVm.ipv4.interface;
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
];
})
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = "eth0";
};
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = "eth0";
};
## IPv6
(mkIf config.fediversityVm.ipv6.enable {
interfaces.${config.fediversityVm.ipv6.interface}.ipv6.addresses = [
{ inherit (config.fediversityVm.ipv6) address prefixLength; }
];
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = config.fediversityVm.ipv6.interface;
};
nameservers = [
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
})
];
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
};
};
};
}

View file

@ -1,13 +1,5 @@
{
config,
...
}:
{
_class = "nixos";
users.users = {
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = {
isNormalUser = true;
extraGroups = [ "wheel" ];

View file

@ -6,8 +6,6 @@ let
in
{
# `config` not set and imported from multiple places: no fixed module class
options.fediversityVm = {
##########################################################################
@ -91,17 +89,6 @@ in
};
ipv4 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv4 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 4. It will be injected as a
@ -127,17 +114,6 @@ in
};
ipv6 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv6 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 6. It will be injected as a

View file

@ -2,9 +2,6 @@
inputs,
lib,
config,
sources,
keys,
secrets,
...
}:
@ -13,10 +10,12 @@ let
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix;
secretsPrefix = ../../secrets;
secrets = import (secretsPrefix + "/secrets.nix");
keys = import ../../keys;
in
{
_class = "nixops4Resource";
imports = [ ./options.nix ];
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
@ -26,15 +25,16 @@ in
hostPublicKey = config.fediversityVm.hostPublicKey;
};
inherit (inputs) nixpkgs;
nixpkgs = inputs.nixpkgs;
## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else
## should go into the `./nixos` subdirectory.
nixos.module = {
imports = [
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
inputs.agenix.nixosModules.default
inputs.disko.nixosModules.default
inputs.home-manager.nixosModules.home-manager
./options.nix
./nixos
];
@ -43,23 +43,21 @@ in
## configuration.
fediversityVm = config.fediversityVm;
## Read all the secrets, filter the ones that are supposed to be readable with
## public key, and create a mapping from `<name>.file` to the absolute path of
## the secret's file.
## Read all the secrets, filter the ones that are supposed to be readable
## with this host's public key, and add them correctly to the configuration
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
${removeSuffix ".age" name}.file = secrets.rootPath + "/${name}";
}
) secrets.mapping;
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
})
) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo.
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
# allow continuous deployment access
keys.cd
];
};

View file

@ -1,9 +1,7 @@
{
self,
inputs,
lib,
sources,
keys,
secrets,
...
}:
@ -23,23 +21,7 @@ let
makeResourceModule =
{ vmName, isTestVm }:
{
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch flake-parts and have our own data model for how the project is organised internally
_module.args = {
inherit
inputs
keys
secrets
;
};
nixos.module.imports = [
./common/proxmox-qemu-vm.nix
];
nixos.specialArgs = {
inherit sources;
};
_module.args = { inherit inputs; };
imports =
[
./common/resource.nix
@ -47,17 +29,17 @@ let
++ (
if isTestVm then
[
../machines/operator/${vmName}
./test-machines/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
# allow our panel vm access to the test machines
keys.panel
(import ../keys).panel
];
}
]
else
[
../machines/dev/${vmName}
./machines/${vmName}
]
);
fediversityVm.name = vmName;
@ -69,33 +51,17 @@ let
vmNames:
{ providers, ... }:
{
# XXX: this type merge is for adding `specialArgs` to resource modules
options.resources = mkOption {
type =
with lib.types;
lazyAttrsOf (submoduleWith {
class = "nixops4Resource";
modules = [ ];
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs`
# once we're sure it's sane. leaving it here for better control during refactoring.
specialArgs = {
inherit sources;
};
});
};
config = {
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];
@ -107,7 +73,7 @@ let
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../services/fediversity;
inherit (self.nixosModules) fediversity;
}
{
garageConfigurationResource = makeResourceModule {
@ -130,7 +96,7 @@ let
nixops4ResourceNixosMockOptions = {
## NOTE: We allow the use of a few options from
## `nixops4-nixos.modules.nixops4Resource.nixos` such that we can
## `inputs.nixops4-nixos.modules.nixops4Resource.nixos` such that we can
## reuse modules that make use of them.
##
## REVIEW: We can probably do much better and cleaner. On the other hand,
@ -155,10 +121,7 @@ let
## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration =
isTestVm: vmName:
let
inherit (sources) nixpkgs;
in
import "${nixpkgs}/nixos" {
inputs.nixpkgs.lib.nixosSystem {
modules = [
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
];
@ -182,12 +145,12 @@ let
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev;
testMachines = listSubdirectories ../machines/operator;
machines = listSubdirectories ./machines;
testMachines = listSubdirectories ./test-machines;
in
{
_class = "flake";
flake.lib.makeInstallerIso = import ./makeInstallerIso.nix;
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
@ -204,7 +167,7 @@ in
if env != "" then
env
else
builtins.trace "env var DEPLOYMENT not set, falling back to ../deployment/configuration.sample.json!" (readFile ../deployment/configuration.sample.json)
builtins.trace "env var DEPLOYMENT not set, falling back to ./test-machines/configuration.json!" (readFile ./test-machines/configuration.json)
)
);
};

View file

@ -7,10 +7,9 @@ Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description
--------|---------|-------------
[`fedi200`](./dev/fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./dev/fedi201) | fediversity | FediPanel
[`vm02116`](./dev/vm02116) | procolix | Forgejo
[`vm02187`](./dev/vm02187) | procolix | Wiki
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
[`fedi200`](./fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./fedi201) | fediversity | FediPanel
[`vm02116`](./vm02116) | procolix | Forgejo
[`vm02187`](./vm02187) | procolix | Wiki
This table excludes all machines with names starting with `test`.

View file

@ -20,7 +20,7 @@ vmOptions=$(
cd ..
nix eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ../.)).vmOptions
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
" \
--log-format raw --quiet
)
@ -32,12 +32,11 @@ for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
# shellcheck disable=SC2016
printf '[`%s`](./dev/%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
fi
done
cat <<\EOF
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
This table excludes all machines with names starting with `test`.
EOF

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 200;
proxmox = "fediversity";
@ -16,10 +14,4 @@
gateway = "2a00:51c0:13:1305::1";
};
};
nixos.module = {
imports = [
../../../infra/common/proxmox-qemu-vm.nix
];
};
}

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 201;
proxmox = "fediversity";
@ -19,7 +17,6 @@
nixos.module = {
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./fedipanel.nix
];
};

View file

@ -1,17 +1,13 @@
{
config,
sources,
...
}:
let
name = "panel";
in
{
_class = "nixos";
imports = [
(import ../../../panel { }).module
"${sources.home-manager}/nixos"
];
security.acme = {
@ -41,6 +37,27 @@ in
enable = true;
production = true;
domain = "demo.fediversity.eu";
# FIXME: make it work without this duplication
settings =
let
cfg = config.services.${name};
in
{
STATIC_ROOT = "/var/lib/${name}/static";
DEBUG = false;
ALLOWED_HOSTS = [
cfg.domain
cfg.host
"localhost"
"[::1]"
];
CSRF_TRUSTED_ORIGINS = [ "https://${cfg.domain}" ];
COMPRESS_OFFLINE = true;
LIBSASS_OUTPUT_STYLE = "compressed";
};
environment = {
SSH_PRIVATE_KEY_FILE = config.age.secrets.panel-ssh-key.path;
};
secrets = {
SECRET_KEY = config.age.secrets.panel-secret-key.path;
};

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 2116;
proxmox = "procolix";
@ -14,7 +12,6 @@
{ lib, ... }:
{
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./forgejo.nix
];

View file

@ -5,8 +5,6 @@ let
in
{
_class = "nixos";
services.forgejo = {
enable = true;

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 2187;
proxmox = "procolix";
@ -14,7 +12,6 @@
{ lib, ... }:
{
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./wiki.nix
];

View file

@ -1,8 +1,6 @@
{ config, ... }:
{
_class = "nixos";
services.phpfpm.pools.mediawiki.phpOptions = ''
upload_max_filesize = 1024M;
post_max_size = 1024M;

View file

@ -15,6 +15,7 @@ let
installer =
{
config,
pkgs,
lib,
...

View file

@ -229,7 +229,7 @@ build_iso () {
nix build \
--impure --expr "
let flake = builtins.getFlake (builtins.toString ./.); in
import ./makeInstallerIso.nix {
flake.lib.makeInstallerIso {
nixosConfiguration = flake.nixosConfigurations.$vm_name;
nixpkgs = flake.inputs.nixpkgs;
$nix_host_keys

View file

@ -1,5 +1,5 @@
{
"domain": "fediversity.net",
"domain": "abundos.eu",
"mastodon": { "enable": false },
"peertube": { "enable": false },
"pixelfed": { "enable": false },

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7001;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7002;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7003;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7004;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7005;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7006;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7011;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7012;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7013;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7014;
proxmox = "fediversity";

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlsYTtMx3hFO8B5B8iHaXL2JKj9izHeC+/AMhIWXBPs cd-age

View file

@ -1 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICJj4L5Yt9ABdQeEkJI6VuJEyUSVbCHMxYLdvVcB/pXh niols@wallace/fediversity
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEElREJN0AC7lbp+5X204pQ5r030IbgCllsIxyU3iiKY niols@wallace

View file

@ -35,5 +35,4 @@ in
contributors = collectKeys ./contributors;
systems = collectKeys ./systems;
panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub);
cd = removeTrailingWhitespace (readFile ./cd-ssh-key.pub);
}

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.keys = import ./.;
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFXQW5fxJoNY9wtTMsNExgbAbvyljIRGBLjY+USh/0A

10
launch/.envrc Normal file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# the shebang is ignored, but nice for editors
# shellcheck shell=bash
if type -P lorri &>/dev/null; then
eval "$(lorri direnv --flake .)"
else
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
use flake
fi

5
launch/.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
.auto.tfvars.json
.npins.json
.terraform/
.terraform.tfstate.lock.info
terraform.tfstate*

16
launch/.terraform.lock.hcl generated Normal file
View file

@ -0,0 +1,16 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/hashicorp/external" {
version = "2.3.4"
hashes = [
"h1:HfVaWMC7Tz+tRfoWZtGCX2MATcgX3HsexoirWdi/voo=",
]
}
provider "registry.opentofu.org/hashicorp/null" {
version = "3.2.3"
hashes = [
"h1:qTlGDGC3RmXIPLgwsIh4LHG/DrAR6T6L+Wn6egnQnwE=",
]
}

24
launch/README.md Normal file
View file

@ -0,0 +1,24 @@
# service deployment
## usage
<-- TODO: port to just -->
### updating npins
```sh
$ cd launch/
$ echo "$(nix eval --json -f ../npins)" > .npins.json
```
### local development
```sh
$ nix-shell
$ eval "$(ssh-agent -s)"
# set your ssh key, e.g.:
$ ssh_key="$(readlink -f ~/.ssh/id_ed25519)"
$ echo "{\"ssh_private_key_file\": \"${ssh_key}\", \"deploy_environment\": {\"SSH_AUTH_SOCK\": \"${SSH_AUTH_SOCK}\"}}" > .auto.tfvars.json
$ rm -rf .terraform/
$ tofu init
```

31
launch/default.nix Normal file
View file

@ -0,0 +1,31 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
inputs ? import sources.flake-inputs {
root = ../.;
},
# match the same version of opentofu that is deployed by the root flake
pkgs ? import inputs.nixpkgs {
inherit system;
},
}:
let
inherit (pkgs) lib;
in
{
shell = pkgs.mkShellNoCC {
packages = [
pkgs.npins
pkgs.jaq # tf
(import ./tf.nix { inherit lib pkgs; })
];
};
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

34
launch/garage.nix Normal file
View file

@ -0,0 +1,34 @@
{ pkgs, ... }:
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
in
{
fediversity = {
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
}

179
launch/main.tf Normal file
View file

@ -0,0 +1,179 @@
variable "domain" {
type = string
default = "fediversity.net"
}
variable "mastodon" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "pixelfed" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "peertube" {
type = object({
enable = bool
})
default = {
enable = false
}
}
variable "initialUser" {
type = object({
displayName = string
username = string
email = string
# TODO: mark (nested) credentials as sensitive
# https://discuss.hashicorp.com/t/is-it-possible-to-mark-an-attribute-of-an-object-as-sensitive/24649/2
password = string
})
default = {
displayName = "Testy McTestface"
username = "test"
email = "test@test.com"
password = "testtest"
}
}
variable "ssh_private_key_file" {
type = string
description = "Path to private key used to connect to the target_host"
default = ""
}
variable "deploy_environment" {
type = map(string)
description = "Extra environment variables to be set during deployment."
default = {}
}
locals {
system = "x86_64-linux"
pins = jsondecode(file("${path.module}/.npins.json"))
peripheral_configs = {
garage = "test01"
}
application_configs = {
mastodon = {
cfg = var.mastodon
hostname = "test06"
}
pixelfed = {
cfg = var.pixelfed
hostname = "test04"
}
peertube = {
cfg = var.peertube
hostname = "test05"
}
}
peripherals = { for name, inst in local.peripheral_configs : name => {
hostname = inst
cfg = {
enable = anytrue([for _, app in local.application_configs: app.cfg.enable])
}
}
}
}
# FIXME settle for pwd when in /nix/store?
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo '{\"hash\":\"$(nix-hash ..)\"}'"]
}
# merged instantiate/deploy to prevent 24+s instantiates when nothing changed.
# terraform-nixos separates these to only deploy if instantiate changed.
# FIXME find a better solution for this. current considerations were:
# - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?).
# - `data` always runs, which is slow for deploy/instantiation.
resource "terraform_data" "nixos" {
for_each = {for name, inst in merge(
local.peripherals,
local.application_configs,
) : name => inst if inst.cfg.enable}
triggers_replace = [
data.external.hash.result,
var.deploy_environment,
var.domain,
var.initialUser,
local.system,
each.key,
each.value,
]
provisioner "local-exec" {
working_dir = path.root
environment = merge(var.deploy_environment, {
NIX_PATH = join(":", [for name, path in local.pins : "${name}=${path}"]),
})
# TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs?
# TODO: build on target?
command = <<-EOF
set -euo pipefail
# INSTANTIATE
command=(
nix-instantiate
--expr
'let
os = import <nixpkgs/nixos> {
system = "${local.system}";
configuration = {
terraform = builtins.fromJSON "${replace(jsonencode({
domain = var.domain
hostname = each.value.hostname
initialUser = var.initialUser
}), "\"", "\\\"")}";
imports = [
${path.root}/options.nix
${path.root}/shared.nix
${path.root}/${each.key}.nix
# FIXME: get VM details from TF
${path.root}/../infra/test-machines/${each.value.hostname}
];
};
};
in {
substituters = builtins.concatStringsSep " " os.config.nix.settings.substituters;
trusted_public_keys = builtins.concatStringsSep " " os.config.nix.settings.trusted-public-keys;
drv_path = os.config.system.build.toplevel.drvPath;
out_path = os.config.system.build.toplevel;
}'
)
"$${command[@]}" -A out_path
json="$("$${command[@]}" --eval --strict --json)"
# DEPLOY
declare substituters trusted_public_keys drv_path
eval "export $(echo $json | jaq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
host="root@${each.value.hostname}.abundos.eu" # FIXME: #24
buildArgs=(
--option extra-binary-caches https://cache.nixos.org/
--option substituters $substituters
--option trusted-public-keys $trusted_public_keys
)
sshOpts=(
-o StrictHostKeyChecking=no
-o BatchMode=yes
-o "IdentityFile='${var.ssh_private_key_file}'"
)
outPath=$(nix-store --realize "$drv_path" "$${buildArgs[@]}")
NIX_SSHOPTS="$${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
ssh "$${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; $outPath/bin/switch-to-configuration switch"
EOF
}
}

17
launch/mastodon.nix Normal file
View file

@ -0,0 +1,17 @@
{ pkgs, ... }:
let
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
in
{
fediversity = {
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
}

53
launch/options.nix Normal file
View file

@ -0,0 +1,53 @@
{
lib,
...
}:
let
inherit (lib) types mkOption;
inherit (types) str enum submodule;
in
{
options.terraform = {
domain = mkOption {
type = enum [
"fediversity.net"
];
description = ''
Apex domain under which the services will be deployed.
'';
default = "fediversity.net";
};
hostname = mkOption {
type = str;
description = ''
Internal name of the host, e.g. test01
'';
};
initialUser = mkOption {
description = ''
Some services require an initial user to access them.
This option sets the credentials for such an initial user.
'';
type = submodule {
options = {
displayName = mkOption {
type = str;
description = "Display name of the user";
};
username = mkOption {
type = str;
description = "Username for login";
};
email = mkOption {
type = str;
description = "User's email address";
};
password = mkOption {
type = str;
description = "Password for login";
};
};
};
};
};
}

Some files were not shown because too many files have changed in this diff Show more