Compare commits

..

3 commits

Author SHA1 Message Date
e916c606d1
move files in line with tf-infra 2025-06-17 08:32:59 +02:00
90cda83039
account for 285 2025-05-09 15:16:08 +02:00
1019ac15b0
button works deployed 2025-05-05 20:22:54 +02:00
144 changed files with 889 additions and 2712 deletions

View file

@ -1,24 +0,0 @@
name: deploy-infra
on:
workflow_dispatch: # allows manual triggering
push:
branches:
- main
jobs:
deploy:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key for age secrets and SSH
run: |
env
mkdir -p ~/.ssh
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'

View file

@ -15,29 +15,17 @@ jobs:
- uses: actions/checkout@v4
- run: nix-build -A tests
check-data-model:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-mastodon:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
check-peertube:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
- run: cd services && nix-build -A tests.peertube
check-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build -A tests.panel
- run: cd panel && nix-build -A tests
check-deployment-basic:
runs-on: native
@ -45,44 +33,6 @@ jobs:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-basic -L
check-deployment-cli:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-cli -L
check-deployment-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L
## NOTE: NixOps4 does not provide a good “dry run” mode, so we instead check
## proxies for resources, namely whether their `.#vmOptions.<machine>` and
## `.#nixosConfigurations.<machine>` outputs evaluate and build correctly, and
## whether we can dry run `infra/proxmox-*.sh` on them. This will not catch
## everything, and in particular not issues in how NixOps4 wires up the
## resources, but that is still something.
check-resources:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: |
set -euC
echo ==================== [ VM Options ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).vmOptions)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.vmOptions-$machine
done
echo
echo ==================== [ NixOS Configurations ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).nixosConfigurations)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.nixosConfigurations-$machine
done
check-launch:
runs-on: native
steps:

View file

@ -1,24 +0,0 @@
name: update-dependencies
on:
workflow_dispatch: # allows manual triggering
# FIXME: re-enable when manual run works
# schedule:
# - cron: '0 0 1 * *' # monthly
jobs:
lockfile:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update pins
run: nix-shell --run "npins --verbose update"
- name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
with:
remote-instance-api-version: v1
token: "${{ secrets.DEPLOY_KEY }}"
branch: npins-update
commit-message: "npins: update sources"
title: "npins: update sources"

View file

@ -14,12 +14,6 @@ There already exist solutions for self-hosting, but they're not suitable for wha
The ones we're aware of require substantial technical knowledge and time commitment by operators, especially for scaling to thousands of users.
Not everyone has the expertise and time to run their own server.
## Interactions
To reach these goals, we aim to implement the following interactions between [actors](#actors) (depicted with rounded corners) and system components (see the [glossary](#glossary), depicted with rectangles).
![](https://git.fediversity.eu/Fediversity/meta/raw/branch/main/architecture-docs/interactions.svg)
## Actors
- Fediversity project team
@ -63,11 +57,11 @@ To reach these goals, we aim to implement the following interactions between [ac
- [Fediverse](https://en.wikipedia.org/wiki/Fediverse)
A collection of social networking applications that can communicate with each other using a common protocol.
A collection of social networking services that can communicate with each other using a common protocol.
- Application
- Service
User-facing software (e.g. from Fediverse) run by the hosting provider for an operator.
A Fediverse application run by the hosting provider for an operator.
- Configuration
@ -79,11 +73,11 @@ To reach these goals, we aim to implement the following interactions between [ac
Make a resource, such as a virtual machine, available for use.
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for applications run by operators.
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for services run by operators.
- Deploy
Put software, such as applications, onto computers.
Put software, such as services, onto computers.
The software includes technical configuration that links software components.
Most user-facing configuration remains untouched by the deployment process.
@ -91,7 +85,7 @@ To reach these goals, we aim to implement the following interactions between [ac
- Migrate
Move service configurations and deployment state, including user data, from one hosting provider to another.
Move service configurations and user data to a different hosting provider.
- [NixOps4](https://github.com/nixops4/nixops4)
@ -109,18 +103,6 @@ To reach these goals, we aim to implement the following interactions between [ac
> Example: We need a resource provider for obtaining deployment secrets from a database.
- Runtime backend
A type of digital environment one can run operating systems such as NixOS on, e.g. bare-metal, a hypervisor, or a container runtime.
- Runtime environment
The thing a deployment runs on, an interface against which the deployment is working. See runtime backend.
- Runtime config
Configuration logic specific to a runtime backend, e.g. how to deploy, how to access object storage.
## Development
All the code made for this project is freely licenced under [EUPL](https://en.m.wikipedia.org/wiki/European_Union_Public_Licence).
@ -154,3 +136,6 @@ details as to what they are for. As an overview:
- [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

View file

@ -10,9 +10,6 @@ let
gitignore
;
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
@ -27,7 +24,6 @@ let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [
"npins"
"**/.terraform"
];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
@ -45,35 +41,10 @@ in
shell = pkgs.mkShellNoCC {
inherit (pre-commit-check) shellHook;
buildInputs = pre-commit-check.enabledPackages;
packages =
let
test-loop = pkgs.writeShellApplication {
name = "test-loop";
runtimeInputs = [
pkgs.watchexec
pkgs.nix-unit
];
text = ''
watchexec -w ${builtins.toString ./.} -- nix-unit ${builtins.toString ./deployment/data-model-test.nix} "$@"
'';
};
in
[
pkgs.npins
pkgs.nil
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
pkgs.nix-unit
test-loop
nixops4.packages.${system}.default
];
};
tests = {
inherit pre-commit-check;
panel = panel.tests;
};
# re-export inputs so they can be overridden granularly

View file

@ -1,123 +1,6 @@
# Deployment
This directory contains work to generate a full Fediversity deployment from a minimal configuration.
This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services.
## Data model
The core piece of the project is the [Fediversity data model](./data-model.nix), which describes all entities and their interactions.
What can be done with it is exemplified in the [evaluation tests](./data-model-test.nix).
Run `test-loop` in the development environment when hacking on the data model or adding tests.
## Checks
There are three levels of deployment checks: `basic`, `cli`, `panel`.
They can be found in subdirectories of [`check/`](./check).
They can be run as part of `nix flake check` or individually as:
``` console
$ nix build .#checks.<system>.deployment-<name> -vL
```
Since `nixops4 apply` operates on a flake, the tests take this repository's flake as a template.
This also why there are some dummy files that will be overwritten inside the test.
### Basic deployment check
The basic deployment check is here as a building block and sanity check.
It does not actually use any of the code in this directory but checks that our test strategy is sound and that basic NixOps4 functionalities are here.
It is a NixOS test featuring one deployer machine and two target machines.
The deployment simply adds `pkgs.hello` to one and `pkgs.cowsay` to the other.
It is heavily inspired by [a similar test in `nixops4-nixos`].
[a similar test in nixops4-nixos]: https://github.com/nixops4/nixops4-nixos/blob/main/test/default/nixosTest.nix
This test involves three nodes:
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
- “target machines” are two eponymous nodes on which the packages `hello` and `cowsay` will be deployed.
They start with a minimal configuration.
``` mermaid
flowchart LR
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
subgraph target_machines["target machines"]
direction TB
hello
cowsay
end
deployer -->|deploys| target_machines
```
### Service deployment check using `nixops4 apply`
This check omits the panel by running a direct invocation of NixOps4.
It deploys some services and checks that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic deployment check.
This test involves seven nodes:
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
- “target machines” are four nodes — `garage`, `mastodon`, `peertube`, and `pixelfed` — on which the services will be deployed.
They start with a minimal configuration.
- `acme` is a node that runs [Pebble], a miniature ACME server to deliver the certificates that the services expect.
- [WIP] `client` is a node that runs a browser controlled by some Selenium scripts in order to check that the services are indeed running and are accessible.
[Pebble]: https://github.com/letsencrypt/pebble
``` mermaid
flowchart LR
classDef invisible fill:none,stroke:none
subgraph left [" "]
direction TB
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
client["client<br><font size='1'>Selenium scripts</font>"]
end
subgraph middle [" "]
subgraph target_machines["target machines"]
direction TB
garage
mastodon
peertube
pixelfed
end
end
subgraph right [" "]
direction TB
acme["acme<br><font size='1'>runs Pebble</font>"]
end
left ~~~ middle ~~~ right
class left,middle,right invisible
deployer -->|deploys| target_machines
client -->|tests| mastodon
client -->|tests| peertube
client -->|tests| pixelfed
target_machines -->|get certs| acme
```
### Service deployment check from the FediPanel
This is a full deployment check running the [FediPanel](../panel) on the deployer machine, deploying some services through it and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic and CLI deployment checks, the only difference being that `deployer` runs NixOps4 only indirectly via the panel, and the `client` node is the one that triggers the deployment via a browser, the way a human would.
This repository contains work to generate a full Fediversity deployment from a
minimal configuration. This is different from [`../services/`](../services) that
focuses on one machine, providing a polished and unified interface to different
Fediverse services.

View file

@ -0,0 +1,9 @@
# Basic deployment test
Basic deployment test with one deployer machine, one target machine, and a
simple target application, namely cowsay. The goal is to check that basic
functionalities are here.
It is heavily inspired by a similar test in nixops4-nixos:
https://github.com/nixops4/nixops4-nixos/blob/main/test/default/nixosTest.nix

View file

@ -1,8 +0,0 @@
{
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
}

View file

@ -1,14 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
}

View file

@ -0,0 +1 @@
## This file is just a placeholder. It is overwritten by the test.

View file

@ -1,36 +1,32 @@
{
inputs,
sources,
lib,
providers,
...
}:
let
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
in
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources.target = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "target";
hostPublicKey = builtins.readFile ./target_host_key.pub;
};
resources = lib.genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
nixpkgs = inputs.nixpkgs;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
imports = [
./minimalTarget.nix
(lib.modules.importJSON ./target-network.json)
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.systemPackages = [ pkgs.cowsay ];
};
};
});
}

View file

@ -0,0 +1,21 @@
{ inputs, ... }:
{
nixops4Deployments.check-deployment-basic =
{ ... }:
{
imports = [
./deployment.nix
];
_module.args.inputs = inputs;
};
perSystem =
{ inputs', pkgs, ... }:
{
checks.deployment-basic = pkgs.callPackage ./nixosTest.nix {
nixops4-flake-in-a-bottle = inputs'.nixops4.packages.flake-in-a-bottle;
inherit inputs;
};
};
}

View file

@ -1,22 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-basic = {
imports = [ ./deployment/check/basic/deployment.nix ];
_module.args = { inherit inputs sources; };
};
}
);
}

View file

@ -0,0 +1,35 @@
{
lib,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
];
## Test framework disables switching by default. That might be OK by itself,
## but we also use this config for getting the dependencies in
## `deployer.system.extraDependencies`.
system.switch.enable = true;
nix = {
## Not used; save a large copy operation
channel.enable = false;
registry = lib.mkForce { };
};
services.openssh = {
enable = true;
settings.PermitRootLogin = "yes";
};
networking.firewall.allowedTCPPorts = [ 22 ];
users.users.root.openssh.authorizedKeys.keyFiles = [ ./deployer.pub ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
}

View file

@ -1,48 +1,161 @@
{ inputs, lib, ... }:
{
_class = "nixosTest";
testers,
inputs,
runCommandNoCC,
nixops4-flake-in-a-bottle,
...
}:
testers.runNixOSTest (
{
lib,
config,
hostPkgs,
...
}:
let
vmSystem = config.node.pkgs.hostPlatform.system;
pathToRoot = ../../..;
pathFromRoot = "deployment/check/basic";
deploymentName = "check-deployment-basic";
## TODO: sanity check the existence of (pathToRoot + "/flake.nix")
## TODO: sanity check that (pathToRoot + "/${pathFromRoot}" == ./.)
## The whole repository, with the flake at its root.
src = lib.fileset.toSource {
fileset = pathToRoot;
root = pathToRoot;
};
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
builtins.toJSON config.nodes.target.system.build.networkConfig
);
in
{
name = "deployment-basic";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
imports = [
inputs.nixops4-nixos.modules.nixosTest.static
];
nodes.deployer =
{ pkgs, ... }:
nodes = {
deployer =
{ pkgs, nodes, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${pkgs.system}.default
inputs.nixops4.packages.${vmSystem}.default
];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
cowsay
];
};
virtualisation = {
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4096;
diskSize = 10 * 1024;
cores = 2;
};
extraTestScript = ''
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
};
system.extraDependencies =
[
"${inputs.flake-parts}"
"${inputs.flake-parts.inputs.nixpkgs-lib}"
"${inputs.nixops4}"
"${inputs.nixops4-nixos}"
"${inputs.nixpkgs}"
pkgs.stdenv
pkgs.stdenvNoCC
pkgs.cowsay
pkgs.cowsay.inputDerivation # NOTE: Crucial!!!
## Some derivations will be different compared to target's initial
## state, so we'll need to be able to build something similar.
## Generally the derivation inputs aren't that different, so we
## use the initial state of the target as a base.
nodes.target.system.build.toplevel.inputDerivation
nodes.target.system.build.etc.inputDerivation
nodes.target.system.path.inputDerivation
nodes.target.system.build.bootStage1.inputDerivation
nodes.target.system.build.bootStage2.inputDerivation
]
++ lib.concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) nodes.target.environment.etc
);
};
target.imports = [ ./minimalTarget.nix ];
};
testScript = ''
start_all()
target.wait_for_unit("multi-user.target")
deployer.wait_for_unit("multi-user.target")
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${src} work")
with subtest("Configure the network"):
deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
deployer.succeed("mv /root/target-network.json work/${pathFromRoot}/target-network.json")
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
deployer.succeed(f"echo '{deployer_key}' > work/${pathFromRoot}/deployer.pub")
target.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
with subtest("Configure the target host key"):
target_host_key = target.succeed("ssh-keyscan target | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{target_host_key}' > work/${pathFromRoot}/target_host_key.pub")
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
with subtest("Override the lock"):
deployer.succeed("""
cd work
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input flake-parts ${inputs.flake-parts} \
--override-input nixops4 ${nixops4-flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${nixops4-flake-in-a-bottle} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
\
--override-input nixpkgs ${inputs.nixpkgs} \
--override-input git-hooks ${inputs.git-hooks} \
;
""")
with subtest("Check the status before deployment"):
hello.fail("hello 1>&2")
cowsay.fail("cowsay 1>&2")
target.fail("cowsay hi 1>&2")
with subtest("Run the deployment"):
deployer.succeed("nixops4 apply check-deployment-basic --show-trace --no-interactive 1>&2")
deployer.succeed("cd work && nixops4 apply ${deploymentName} --show-trace --no-interactive")
with subtest("Check the deployment"):
hello.succeed("hello 1>&2")
cowsay.succeed("cowsay hi 1>&2")
target.succeed("cowsay hi 1>&2")
'';
}
)

View file

@ -0,0 +1 @@
{"comment": "This file is just a placeholder. It is overwritten by the test."}

View file

@ -0,0 +1 @@
## This file is just a placeholder. It is overwritten by the test.

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,59 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
}

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments = import ./deployment/check/cli/deployments.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,137 +0,0 @@
{
inputs,
hostPkgs,
lib,
...
}:
let
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "";
in
{
_class = "nixosTest";
name = "deployment-cli";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployments.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in panel test)
../../default.nix
../../options.nix
../../configuration.sample.json
../../../services/fediversity
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${pkgs.system}.default
];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-mastodon-pixelfed --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-peertube --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,106 +0,0 @@
{
inputs,
lib,
pkgs,
config,
sources,
...
}:
let
inherit (lib)
mkOption
mkForce
concatLists
types
;
in
{
_class = "nixos";
imports = [ ./sharedOptions.nix ];
options.system.extraDependenciesFromModule = mkOption {
type = types.deferredModule;
description = ''
Grab the derivations needed to build the given module and dump them in
system.extraDependencies. You want to put in this module a superset of
all the things that you will need on your target machines.
NOTE: This will work as long as the union of all these configurations do
not have conflicts that would prevent evaluation.
'';
default = { };
};
config = {
virtualisation = {
## NOTE: The deployer machines needs more RAM and default than the
## default. These values have been trimmed down to the gigabyte.
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4 * 1024;
diskSize = 4 * 1024;
cores = 2;
};
nix.settings = {
substituters = mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
extra-experimental-features = "flakes";
};
system.extraDependencies =
[
inputs.nixops4
inputs.nixops4-nixos
inputs.nixpkgs
sources.flake-parts
sources.flake-inputs
sources.git-hooks
pkgs.stdenv
pkgs.stdenvNoCC
]
++ (
let
## We build a whole NixOS system that contains the module
## `system.extraDependenciesFromModule`, only to grab its
## configuration and the store paths needed to build it and
## dump them in `system.extraDependencies`.
machine =
(pkgs.nixos [
./targetNode.nix
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.acmeNodeIP;
}
]).config;
in
[
machine.system.build.toplevel.inputDerivation
machine.system.build.etc.inputDerivation
machine.system.build.etcBasedir.inputDerivation
machine.system.build.etcMetadataImage.inputDerivation
machine.system.build.extraUtils.inputDerivation
machine.system.path.inputDerivation
machine.system.build.setEnvironment.inputDerivation
machine.system.build.vm.inputDerivation
machine.system.build.bootStage1.inputDerivation
machine.system.build.bootStage2.inputDerivation
]
++ concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) machine.environment.etc
)
);
};
}

View file

@ -1,201 +0,0 @@
{
inputs,
lib,
config,
hostPkgs,
sources,
...
}:
let
inherit (builtins)
concatStringsSep
toJSON
;
inherit (lib)
types
fileset
mkOption
genAttrs
attrNames
optionalString
;
inherit (hostPkgs)
runCommandNoCC
writeText
system
;
forConcat = xs: f: concatStringsSep "\n" (map f xs);
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
in
{
_class = "nixosTest";
imports = [
./sharedOptions.nix
];
options = {
## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { };
sourceFileset = mkOption {
## FIXME: grab `lib.types.fileset` from NixOS, once upstreaming PR
## https://github.com/NixOS/nixpkgs/pull/428293 lands.
type = types.mkOptionType {
name = "fileset";
description = "fileset";
descriptionClass = "noun";
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
merge = (_: defs: fileset.unions (map (x: x.value) defs));
};
description = ''
A fileset that will be copied to the deployer node in the current
working directory. This should contain all the files that are
necessary to run that particular test, such as the NixOS
modules necessary to evaluate a deployment.
'';
};
};
config = {
sourceFileset = fileset.unions [
# NOTE: not the flake itself; it will be overridden.
../../../mkFlake.nix
../../../flake.lock
../../../npins
./sharedOptions.nix
./targetNode.nix
./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
nodes =
{
deployer = {
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
};
}
//
(
if config.enableAcme then
{
acme = {
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
## break things once we play with DNS?
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
## We aren't testing ACME - we just want certificates.
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
};
}
else
{ }
)
//
genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
testScript = ''
${forConcat (attrNames config.nodes) (n: ''
${n}.start()
'')}
${forConcat (attrNames config.nodes) (n: ''
${n}.wait_for_unit("multi-user.target")
'')}
## A subset of the repository that is necessary for this test. It will be
## copied inside the test. The smaller this set, the faster our CI, because we
## won't need to re-run when things change outside of it.
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${
fileset.toSource {
root = ../../..;
fileset = config.sourceFileset;
}
}/* .")
with subtest("Configure the network"):
${forConcat config.targetMachines (
tm:
let
targetNetworkJSON = writeText "target-network.json" (
toJSON config.nodes.${tm}.system.build.networkConfig
);
in
''
deployer.copy_from_host("${targetNetworkJSON}", "${config.pathFromRoot}/${tm}-network.json")
''
)}
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
${forConcat config.targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')}
with subtest("Configure the target host key"):
${forConcat config.targetMachines (tm: ''
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
'')}
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
##
## NOTE: We use the repository as-is (cf `src` above), overriding only
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
## lock file to use locally available inputs, as we cannot download them.
##
with subtest("Override the flake and its lock"):
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
deployer.succeed("""
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
;
""")
${optionalString config.enableAcme ''
with subtest("Set up handmade DNS"):
deployer.succeed("echo '${config.nodes.acme.networking.primaryIPAddress}' > ${config.pathFromRoot}/acme_server_ip")
''}
${config.extraTestScript}
'';
};
}

View file

@ -1,68 +0,0 @@
/**
This file contains options shared by various components of the integration test, i.e. deployment resources, test nodes, target configurations, etc.
All these components are declared as modules, but are part of different evaluations, which is the options in this file can't be shared "directly".
Instead, each component imports this module and the same values are set for each of them from a common call site.
Not all components will use all the options, which allows not setting all the values.
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
in
# `config` not set and imported from multiple places: no fixed module class
{
options = {
targetMachines = mkOption {
type = with types; listOf str;
description = ''
Names of the nodes in the NixOS test that are target machines. This is
used by the infrastructure to extract their network configuration, among
other things, and re-import it in the deployment.
'';
};
pathToRoot = mkOption {
type = types.path;
description = ''
Path from the location of the working directory to the root of the
repository.
'';
};
pathFromRoot = mkOption {
type = types.path;
description = ''
Path from the root of the repository to the working directory.
'';
apply = x: lib.path.removePrefix config.pathToRoot x;
};
pathToCwd = mkOption {
type = types.path;
description = ''
Path to the current working directory. This is a shortcut for
pathToRoot/pathFromRoot.
'';
default = config.pathToRoot + "/${config.pathFromRoot}";
};
enableAcme = mkOption {
type = types.bool;
description = ''
Whether to enable ACME in the NixOS test. This will add an ACME server
to the node and connect all the target machines to it.
'';
default = false;
};
acmeNodeIP = mkOption {
type = types.str;
description = ''
The IP of the ACME node in the NixOS test. This option will be set
during the test to the correct value.
'';
};
};
}

View file

@ -1,67 +0,0 @@
{
inputs,
config,
lib,
modulesPath,
...
}:
let
testCerts = import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix";
inherit (lib) mkIf mkMerge;
in
{
_class = "nixos";
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
config = mkMerge [
{
## Test framework disables switching by default. That might be OK by itself,
## but we also use this config for getting the dependencies in
## `deployer.system.extraDependencies`.
system.switch.enable = true;
nix = {
## Not used; save a large copy operation
channel.enable = false;
registry = lib.mkForce { };
};
services.openssh = {
enable = true;
settings.PermitRootLogin = "yes";
};
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
}
(mkIf config.enableAcme {
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
## NOTE: This certificate is the one used by the Pebble HTTPS server.
## This is NOT the root CA of the Pebble server. We do add it here so
## that Pebble clients can talk to its API, but this will not allow
## those machines to verify generated certificates.
testCerts.ca.cert
];
## FIXME: it is a bit sad that all this logistics is necessary. look into
## better DNS stuff
networking.extraHosts = "${config.acmeNodeIP} acme.test";
})
];
}

View file

@ -1,51 +0,0 @@
{
inputs,
lib,
config,
sources,
...
}:
let
inherit (builtins) readFile;
inherit (lib) trim mkOption types;
in
{
_class = "nixops4Resource";
imports = [ ./sharedOptions.nix ];
options = {
nodeName = mkOption {
type = types.str;
description = ''
The name of the node in the NixOS test;
needed for recovering the node configuration to prepare its deployment.
'';
};
};
config = {
ssh = {
host = config.nodeName;
hostPublicKey = readFile (config.pathToCwd + "/${config.nodeName}_host_key.pub");
};
nixpkgs = inputs.nixpkgs;
nixos.module = {
imports = [
./targetNode.nix
(lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json"))
];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip"));
nixpkgs.hostPlatform = "x86_64-linux";
};
};
}

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,58 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
args;
in
makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env == "" then
throw "The DEPLOYMENT environment needs to be set. You do not want to use this deployment unless in the `deployment-panel` NixOS test."
else
env
)
)

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-panel = import ./deployment/check/panel/deployment.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,377 +0,0 @@
{
inputs,
lib,
hostPkgs,
config,
...
}:
let
inherit (lib)
getExe
;
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "dummy";
panelPort = 8000;
panelUser = "test";
panelEmail = "test@test.com";
panelPassword = "ouiprdaaa43"; # panel's manager complains if too close to username or email
fediUser = "test";
fediEmail = "test@test.com";
fediPassword = "testtest";
fediName = "Testy McTestface";
toPythonBool = b: if b then "True" else "False";
interactWithPanel =
{
baseUri,
enableMastodon,
enablePeertube,
enablePixelfed,
}:
hostPkgs.writers.writePython3Bin "interact-with-panel"
{
libraries = with hostPkgs.python3Packages; [ selenium ];
flakeIgnore = [
"E302" # expected 2 blank lines, found 0
"E303" # too many blank lines
"E305" # expected 2 blank lines after end of function or class
"E501" # line too long (> 79 characters)
"E731" # do not assign lambda expression, use a def
];
}
''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
print("Create and configure driver...")
options = Options()
options.add_argument("--headless")
options.binary_location = "${getExe hostPkgs.firefox-unwrapped}"
service = webdriver.FirefoxService(executable_path="${getExe hostPkgs.geckodriver}")
driver = webdriver.Firefox(options=options, service=service)
driver.set_window_size(1280, 960)
driver.implicitly_wait(360)
driver.command_executor.set_timeout(3600)
print("Open login page...")
driver.get("${baseUri}/login/")
print("Enter username...")
driver.find_element(By.XPATH, "//input[@name = 'username']").send_keys("${panelUser}")
print("Enter password...")
driver.find_element(By.XPATH, "//input[@name = 'password']").send_keys("${panelPassword}")
print("Click Login button...")
driver.find_element(By.XPATH, "//button[normalize-space() = 'Login']").click()
print("Open configuration page...")
driver.get("${baseUri}/configuration/")
# Helpers to actually set and not add or switch input values.
def input_set(elt, keys):
elt.clear()
elt.send_keys(keys)
def checkbox_set(elt, new_value):
if new_value != elt.is_selected():
elt.click()
print("Enable Fediversity...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'enable']"), True)
print("Fill in initialUser info...")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.username']"), "${fediUser}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.password']"), "${fediPassword}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.email']"), "${fediEmail}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.displayName']"), "${fediName}")
print("Enable services...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
print("Start deployment...")
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
print("Wait for deployment status to show up...")
get_deployment_result = lambda d: d.find_element(By.XPATH, "//div[@id = 'deployment-result']//p")
WebDriverWait(driver, timeout=3660, poll_frequency=10).until(get_deployment_result)
deployment_result = get_deployment_result(driver).get_attribute('innerHTML')
print("Quit...")
driver.quit()
match deployment_result:
case 'Deployment Succeeded':
print("Deployment has succeeded; exiting normally")
exit(0)
case 'Deployment Failed':
print("Deployment has failed; exiting with return code `1`")
exit(1)
case _:
print(f"Unexpected deployment result: {deployment_result}; exiting with return code `2`")
exit(2)
'';
in
{
_class = "nixosTest";
name = "deployment-panel";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in CLI test)
../../default.nix
../../options.nix
../../../services/fediversity
];
## The panel's module sets `nixpkgs.overlays` which clashes with
## `pkgsReadOnly`. We disable it here.
node.pkgsReadOnly = false;
nodes.deployer =
{ pkgs, ... }:
{
imports = [
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;
domain = "deployer";
secrets = {
SECRET_KEY = dummyFile;
};
port = panelPort;
deployment = {
flake = "/run/fedipanel/flake";
name = "check-deployment-panel";
};
};
environment.systemPackages = [ pkgs.expect ];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy # a configuration checker for nginx
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
nodes.client =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
httpie
dnsutils # for `dig`
openssl
cacert
wget
python3
python3Packages.selenium
firefox-unwrapped
geckodriver
];
security.pki.certificateFiles = [
config.nodes.acme.test-support.acme.caCert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
## TODO: We want a nicer way to control where the FediPanel consumes its
## flake, which can default to the store but could also be somewhere else if
## someone wanted to change the code of the flake.
##
with subtest("Give the panel access to the flake"):
deployer.succeed("mkdir /run/fedipanel /run/fedipanel/flake >&2")
deployer.succeed("cp -R . /run/fedipanel/flake >&2")
deployer.succeed("chown -R panel:panel /run/fedipanel >&2")
## TODO: I want a programmatic way to provide an SSH key to the panel (and
## therefore NixOps4). This should happen either in the Python code, but
## maybe it is fair that that one picks up on the user's key? It could
## also be in the Nix packaging.
##
with subtest("Set up the panel's SSH keys"):
deployer.succeed("mkdir /home/panel/.ssh >&2")
deployer.succeed("cp -R /root/.ssh/* /home/panel/.ssh >&2")
deployer.succeed("chown -R panel:panel /home/panel/.ssh >&2")
deployer.succeed("chmod 600 /home/panel/.ssh/* >&2")
## TODO: This is a hack to accept the root CA used by Pebble on the client
## machine. Pebble randomizes everything, so the only way to get it is to
## call the /roots/0 endpoint at runtime, leaving not much margin for a nice
## Nixy way of adding the certificate. There is no way around it as this is
## by design in Pebble, showing in fact that Pebble was not the appropriate
## tool for our use and that nixpkgs does not in fact provide an easy way to
## generate _usable_ certificates in NixOS tests. I suggest we merge this,
## and track the task to set it up in a cleaner way. I would tackle this in
## a subsequent PR, and hopefully even contribute this BetterWay(tm) to
## nixpkgs. — Niols
##
with subtest("Set up ACME root CA on client"):
client.succeed("""
cd /etc/ssl/certs
curl -o pebble-root-ca.pem https://acme.test:15000/roots/0
curl -o pebble-intermediate-ca.pem https://acme.test:15000/intermediates/0
{ cat ca-bundle.crt
cat pebble-root-ca.pem
cat pebble-intermediate-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
""")
## TODO: I would hope for a more declarative way to add users. This should
## be handled by the Nix packaging of the FediPanel. — Niols
##
with subtest("Create panel user"):
deployer.succeed("""
expect -c '
spawn manage createsuperuser --username ${panelUser} --email ${panelEmail}
expect "Password: "; send "${panelPassword}\\n";
expect "Password (again): "; send "${panelPassword}\\n"
interact
' >&2
""")
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = false;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = true;
enablePeertube = false;
enablePixelfed = true;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = true;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,70 +0,0 @@
let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit inputs;
};
modules = [
module
./data-model.nix
];
}).config;
in
{
_class = "nix-unit";
test-eval = {
expr =
let
fediversity = eval (
{ config, ... }:
{
config = {
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options = {
enable = lib.mkEnableOption "Hello in the shell";
};
};
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
};
};
options = {
example-configuration = mkOption {
type = config.configuration;
readOnly = true;
default = {
enable = true;
applications.hello.enable = true;
};
};
};
}
);
in
{
inherit (fediversity)
example-configuration
;
};
expected = {
example-configuration = {
enable = true;
applications.hello.enable = true;
};
};
};
}

View file

@ -1,89 +0,0 @@
{
lib,
config,
...
}:
let
inherit (lib) mkOption types;
inherit (lib.types)
attrsOf
attrTag
deferredModuleWith
submodule
optionType
functionTo
;
functionType = import ./function.nix;
application-resources = {
options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
);
};
};
in
{
_class = "nixops4Deployment";
options = {
applications = mkOption {
description = "Collection of Fediversity applications";
type = attrsOf (
submodule (application: {
_class = "fediversity-application";
options = {
description = mkOption {
description = "Description to be shown in the application overview";
type = types.str;
};
module = mkOption {
description = "Operator-facing configuration options for the application";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-application-config"; } ]; };
};
implementation = mkOption {
description = "Mapping of application configuration to deployment resources, a description of what an application needs to run";
type = application.config.config-mapping.function-type;
};
resources = mkOption {
description = "Compute resources required by an application";
type = functionTo application.config.config-mapping.output-type;
readOnly = true;
default = input: (application.config.implementation input).output;
};
config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType;
readOnly = true;
default = {
input-type = application.config.module;
output-type = application-resources;
};
};
};
})
);
};
configuration = mkOption {
description = "Configuration type declaring options to be set by operators";
type = optionType;
readOnly = true;
default = submodule {
options = {
enable = lib.mkEnableOption {
description = "your Fediversity configuration";
};
applications = lib.mapAttrs (
_name: application:
mkOption {
description = application.description;
type = submodule application.module;
default = { };
}
) config.applications;
};
};
};
};
}

View file

@ -33,40 +33,17 @@
## information coming from the FediPanel.
##
## FIXME: lock step the interface with the definitions in the FediPanel
panelConfigNullable:
panelConfig:
let
inherit (lib) mkIf;
## The convertor from module options to JSON schema does not generate proper
## JSON schema types, forcing us to use nullable fields for default values.
## However, working with those fields in the deployment code is annoying (and
## unusual for Nix programmers), so we sanitize the input here and add back
## the default value by hand.
nonNull = x: v: if x == null then v else x;
panelConfig = {
domain = nonNull panelConfigNullable.domain "fediversity.net";
initialUser = nonNull panelConfigNullable.initialUser {
displayName = "Testy McTestface";
username = "test";
password = "testtest";
email = "test@test.com";
};
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
peertube = nonNull panelConfigNullable.peertube { enable = false; };
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
};
in
## Regular arguments of a NixOps4 deployment module.
{ config, providers, ... }:
{ providers, ... }:
let
cfg = config.deployment;
in
{
_class = "nixops4Deployment";
options = {
deployment = lib.mkOption {
description = ''
@ -75,7 +52,6 @@ in
# XXX(@fricklerhandwerk):
# misusing this will produce obscure errors that will be truncated by NixOps4
type = lib.types.submodule ./options.nix;
default = panelConfig;
};
};
@ -140,9 +116,9 @@ in
{
garage-configuration = makeConfigurationResource garageConfigurationResource (
{ pkgs, ... }:
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) {
mkIf (panelConfig.mastodon.enable || panelConfig.peertube.enable || panelConfig.pixelfed.enable) {
fediversity = {
inherit (cfg) domain;
inherit (panelConfig) domain;
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
@ -153,14 +129,14 @@ in
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
{ pkgs, ... }:
mkIf cfg.mastodon.enable {
mkIf panelConfig.mastodon.enable {
fediversity = {
inherit (cfg) domain;
inherit (panelConfig) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
@ -174,14 +150,14 @@ in
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
{ pkgs, ... }:
mkIf cfg.peertube.enable {
mkIf panelConfig.peertube.enable {
fediversity = {
inherit (cfg) domain;
inherit (panelConfig) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
@ -197,14 +173,14 @@ in
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
{ pkgs, ... }:
mkIf cfg.pixelfed.enable {
mkIf panelConfig.pixelfed.enable {
fediversity = {
inherit (cfg) domain;
inherit (panelConfig) domain;
temp.initialUser = {
inherit (cfg.initialUser) username email displayName;
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {

View file

@ -1,26 +1,3 @@
{ inputs, sources, ... }:
{
_class = "flake";
perSystem =
{ pkgs, ... }:
{
checks = {
deployment-basic = import ./check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-cli = import ./check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-panel = import ./check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
};
};
imports = [ ./check/basic/flake-part.nix ];
}

View file

@ -1,37 +0,0 @@
/**
Modular function type
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
inherit (types)
deferredModule
submodule
functionTo
optionType
;
in
{
options = {
input-type = mkOption {
type = deferredModule;
};
output-type = mkOption {
type = deferredModule;
};
function-type = mkOption {
type = optionType;
readOnly = true;
default = functionTo (submodule {
options = {
input = mkOption {
type = submodule config.input-type;
};
output = mkOption {
type = submodule config.output-type;
};
};
});
};
};
}

View file

@ -17,8 +17,6 @@ let
inherit (lib) types mkOption;
in
{
_class = "nixops4Deployment";
options = {
enable = lib.mkEnableOption "Fediversity configuration";
domain = mkOption {

216
flake.lock generated
View file

@ -59,6 +59,22 @@
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1733328505,
@ -74,7 +90,7 @@
"type": "github"
}
},
"flake-compat_3": {
"flake-compat_4": {
"flake": false,
"locked": {
"lastModified": 1696426674,
@ -127,6 +143,24 @@
}
},
"flake-parts_3": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_3"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_4": {
"inputs": {
"nixpkgs-lib": [
"nixops4-nixos",
@ -167,12 +201,50 @@
"type": "github"
}
},
"git-hooks-nix": {
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1742649964,
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"gitignore": "gitignore_2",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1737465171,
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
@ -227,6 +299,27 @@
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"gitignore_2": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
@ -248,6 +341,47 @@
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1703113217,
"narHash": "sha256-7ulcXOk63TIT2lVDSExj7XzFx09LpdSAPtvgtM7yQPE=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "3bfaacf46133c037bb356193bd2f1765d9dc82c1",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"home-manager_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1743860185,
"narHash": "sha256-TkhfJ+vH+iGxLQL6RJLObMmldAQpysVJ+p1WnnKyIeQ=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "b5e29565131802cc8adee7dccede794226da8614",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"mk-naked-shell": {
"flake": false,
"locked": {
@ -268,7 +402,7 @@
"inputs": {
"flake-compat": "flake-compat_2",
"flake-parts": "flake-parts_3",
"git-hooks-nix": "git-hooks-nix_2",
"git-hooks-nix": "git-hooks-nix",
"nixfmt": "nixfmt",
"nixpkgs": [
"nixops4-nixos",
@ -339,12 +473,30 @@
"type": "github"
}
},
"nixfmt_2": {
"inputs": {
"flake-utils": "flake-utils_2"
},
"locked": {
"lastModified": 1736283758,
"narHash": "sha256-hrKhUp2V2fk/dvzTTHFqvtOg000G1e+jyIam+D4XqhA=",
"owner": "NixOS",
"repo": "nixfmt",
"rev": "8d4bd690c247004d90d8554f0b746b1231fe2436",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixfmt",
"type": "github"
}
},
"nixops4": {
"inputs": {
"flake-parts": "flake-parts_2",
"flake-parts": "flake-parts_3",
"nix": "nix",
"nix-cargo-integration": "nix-cargo-integration",
"nixpkgs": "nixpkgs_2",
"nixpkgs": "nixpkgs_3",
"nixpkgs-old": "nixpkgs-old"
},
"locked": {
@ -363,7 +515,7 @@
},
"nixops4-nixos": {
"inputs": {
"flake-parts": "flake-parts",
"flake-parts": "flake-parts_2",
"git-hooks-nix": "git-hooks-nix",
"nixops4": "nixops4",
"nixops4-nixos": [
@ -445,6 +597,18 @@
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-lib_3": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-old": {
"locked": {
"lastModified": 1735563628,
@ -478,6 +642,22 @@
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1738410390,
"narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=",
@ -493,6 +673,22 @@
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1740463929,
"narHash": "sha256-4Xhu/3aUdCKeLfdteEHMegx5ooKQvwPHNkOgNCXQrvc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "5d7db4668d7a0c6cc5fc8cf6ef33b008b2b1ed8b",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"parts": {
"inputs": {
"nixpkgs-lib": [
@ -518,7 +714,7 @@
},
"purescript-overlay": {
"inputs": {
"flake-compat": "flake-compat_3",
"flake-compat": "flake-compat_4",
"nixpkgs": [
"nixops4-nixos",
"nixops4",
@ -561,11 +757,15 @@
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"git-hooks": "git-hooks",
"home-manager": "home-manager_2",
"nixops4": [
"nixops4-nixos",
"nixops4"
],
"nixops4-nixos": "nixops4-nixos"
"nixops4-nixos": "nixops4-nixos",
"nixpkgs": "nixpkgs_4"
}
},
"rust-overlay": {

View file

@ -1,42 +1,51 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11"; # consumed by flake-parts
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
inputs@{ flake-parts, ... }:
let
sources = import ./npins;
inherit (sources) git-hooks agenix;
in
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [
"${sources.git-hooks}/flake-module.nix"
(import "${git-hooks}/flake-module.nix")
inputs.nixops4.modules.flake.default
./deployment/flake-part.nix
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
perSystem =
{
pkgs,
lib,
system,
inputs',
...
}:
{
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
};
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
optout = [
"npins"
"**/.terraform"
];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
@ -46,7 +55,21 @@
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
devShells.default = pkgs.mkShell {
packages = [
pkgs.npins
pkgs.nil
(pkgs.callPackage "${agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
# exposing this env var as a hack to pass info in from form
(inputs'.nixops4.packages.default.overrideAttrs {
impureEnvVars = [ "DEPLOYMENT" ];
})
];
};
};
};
}
);
}

View file

@ -2,12 +2,10 @@
let
inherit (lib) mkDefault;
in
{
_class = "nixos";
imports = [
./hardware.nix
./networking.nix
./users.nix
];
@ -23,9 +21,4 @@ in
nix.extraOptions = ''
experimental-features = nix-command flakes
'';
boot.loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
}

View file

@ -1,20 +1,20 @@
{ ... }:
{ modulesPath, ... }:
{
_class = "nixos";
## FIXME: It would be nice, but the following leads to infinite recursion
## in the way we currently plug `sources` in.
##
# imports = [
# "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# ];
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot = {
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
initrd = {
availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];

View file

@ -1,64 +1,63 @@
{ config, lib, ... }:
let
inherit (lib) mkDefault mkIf mkMerge;
inherit (lib) mkDefault;
in
{
_class = "nixos";
config = {
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = mkMerge [
{
networking = {
hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain;
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
## Disable the default firewall and use nftables instead, with a custom
## Procolix-made ruleset.
interfaces = {
eth0 = {
ipv4 = {
addresses = [
{
inherit (config.fediversityVm.ipv4) address prefixLength;
}
];
};
ipv6 = {
addresses = [
{
inherit (config.fediversityVm.ipv6) address prefixLength;
}
];
};
};
};
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = "eth0";
};
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = "eth0";
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
};
}
## IPv4
(mkIf config.fediversityVm.ipv4.enable {
interfaces.${config.fediversityVm.ipv4.interface}.ipv4.addresses = [
{ inherit (config.fediversityVm.ipv4) address prefixLength; }
];
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = config.fediversityVm.ipv4.interface;
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
];
})
## IPv6
(mkIf config.fediversityVm.ipv6.enable {
interfaces.${config.fediversityVm.ipv6.interface}.ipv6.addresses = [
{ inherit (config.fediversityVm.ipv6) address prefixLength; }
];
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = config.fediversityVm.ipv6.interface;
};
nameservers = [
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
})
];
};
}

View file

@ -1,13 +1,5 @@
{
config,
...
}:
{
_class = "nixos";
users.users = {
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = {
isNormalUser = true;
extraGroups = [ "wheel" ];

View file

@ -6,8 +6,6 @@ let
in
{
# `config` not set and imported from multiple places: no fixed module class
options.fediversityVm = {
##########################################################################
@ -20,13 +18,16 @@ in
'';
};
isFediversityVm = mkOption {
type = types.bool;
proxmox = mkOption {
type = types.nullOr (
types.enum [
"procolix"
"fediversity"
]
);
description = ''
Whether the machine is a Fediversity VM or not. This is used to
determine whether the machine should be provisioned via Proxmox or not.
Machines that are _not_ Fediversity VM could be physical machines, or
VMs that live outside Fediversity, eg. on Procolix's Proxmox.
The Proxmox instance. This is used for provisioning only and should be
set to `null` if the machine is not a VM.
'';
};
@ -88,17 +89,6 @@ in
};
ipv4 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv4 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 4. It will be injected as a
@ -124,17 +114,6 @@ in
};
ipv6 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv6 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 6. It will be injected as a

View file

@ -1,9 +1,6 @@
{
inputs,
lib,
config,
keys,
secrets,
...
}:
@ -11,11 +8,15 @@ let
inherit (lib) attrValues elem mkDefault;
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix;
sources = import ../../npins;
inherit (sources) nixpkgs agenix disko home-manager;
secretsPrefix = ../../secrets;
secrets = import (secretsPrefix + "/secrets.nix");
keys = import ../../keys;
in
{
_class = "nixops4Resource";
imports = [ ./options.nix ];
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
@ -25,39 +26,39 @@ in
hostPublicKey = config.fediversityVm.hostPublicKey;
};
inherit (inputs) nixpkgs;
inherit nixpkgs;
## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else
## should go into the `./nixos` subdirectory.
nixos.module = {
imports = [
"${agenix}/modules/age.nix"
"${disko}/module.nix"
"${home-manager}/nixos"
./options.nix
./nixos
./proxmox-qemu-vm.nix
];
## Inject the shared options from the resource's `config` into the NixOS
## configuration.
fediversityVm = config.fediversityVm;
## Read all the secrets, filter the ones that are supposed to be readable with
## public key, and create a mapping from `<name>.file` to the absolute path of
## the secret's file.
## Read all the secrets, filter the ones that are supposed to be readable
## with this host's public key, and add them correctly to the configuration
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
${removeSuffix ".age" name}.file = secrets.rootPath + "/${name}";
}
) secrets.mapping;
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
})
) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo.
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
# allow continuous deployment access
keys.cd
];
};

View file

@ -1,7 +1,14 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
# match the same versions we deploy locally
inputs ? import sources.flake-inputs {
root = ../.;
},
# match the same version of opentofu that is deployed by the root flake
pkgs ? import inputs.nixpkgs {
inherit system;
},
}:
let
inherit (pkgs) lib;

View file

@ -1,9 +1,6 @@
{
inputs,
lib,
sources,
keys,
secrets,
...
}:
@ -14,38 +11,21 @@ let
mkOption
evalModules
filterAttrs
mapAttrs'
deepSeq
;
inherit (lib.attrsets) genAttrs;
sources = import ../../npins;
commonResourceModule = {
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
# flake-parts and have our own data model for how the project is organised
# internally
_module.args = {
inherit
inputs
keys
secrets
sources
;
};
## FIXME: It would be preferrable to have those `sources`-related imports in
## the modules that use them. However, doing so triggers infinite recursions
## because of the way we propagate `sources`. `sources` must be propagated by
## means of `specialArgs`, but this requires a bigger change.
nixos.module.imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
"${sources.home-manager}/nixos"
];
## Given a machine's name, make a resource module,
## except for its missing provider. (Depending on the use of that resource, we
## will provide a different one.)
makeResourceModule =
{ vmName }:
{
imports = [
./common/resource.nix
./machines/${vmName}
];
fediversityVm.name = vmName;
};
## Given a list of machine names, make a deployment with those machines'
@ -59,50 +39,14 @@ let
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
commonResourceModule
../machines/dev/${vmName}
(makeResourceModule {
inherit vmName;
})
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];
## Given an attrset of test configurations (key = test machine name, value =
## NixOS configuration module), make a deployment with those machines'
## configurations as resources.
makeTestDeployment =
(import ../deployment)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../services/fediversity;
}
{
garageConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test01
];
};
mastodonConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
];
};
peertubeConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test05
];
};
pixelfedConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test04
];
};
};
nixops4ResourceNixosMockOptions = {
## NOTE: We allow the use of a few options from
## `nixops4-nixos.modules.nixops4Resource.nixos` such that we can
@ -112,90 +56,62 @@ let
## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4.
options = {
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { };
ssh = mkOption { };
};
};
makeResourceConfig =
{ vmName, isTestVm }:
vm:
(evalModules {
modules = [
nixops4ResourceNixosMockOptions
commonResourceModule
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
(makeResourceModule vm)
];
}).config;
## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration =
isTestVm: vmName:
import "${sources.nixpkgs}/nixos" {
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
system = "x86_64-linux";
vmName:
let
inherit (sources) nixpkgs;
in
import "${nixpkgs}/nixos" {
modules = [
(makeResourceConfig { inherit vmName; }).nixos.module
];
};
makeVmOptions =
isTestVm: vmName:
let
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
in
if config.isFediversityVm then
{
inherit (config)
makeVmOptions = vmName: {
inherit ((makeResourceConfig { inherit vmName; }).fediversityVm)
proxmox
vmId
description
sockets
cores
memory
diskSize
hostPublicKey
unsafeHostPrivateKey
;
}
else
null;
};
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev;
nixosConfigurations =
genAttrs machines (makeConfiguration false);
vmOptions =
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
(genAttrs machines (makeVmOptions false));
machines = listSubdirectories ./machines;
in
{
_class = "flake";
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment.
## - We add a “default” deployment with all normal machines.
## - We add a “test” deployment with all test machines.
## - Each machine gets a NixOS configuration.
## - Each machine gets a VM options entry.
## - Each machine gets a deployment.
## - We add a “default” deployment with all infra machines.
nixops4Deployments = genAttrs machines makeDeployment' // {
default = makeDeployment machines;
};
flake = { inherit nixosConfigurations vmOptions; };
perSystem =
{ pkgs, ... }:
{
checks =
mapAttrs' (name: nixosConfiguration: {
name = "nixosConfigurations-${name}";
value = nixosConfiguration.config.system.build.toplevel;
}) nixosConfigurations
// mapAttrs' (name: vmOptions: {
name = "vmOptions-${name}";
## Check that VM options builds/evaluates correctly. `deepSeq e1
## e2` evaluates `e1` strictly in depth before returning `e2`. We
## use this trick because checks need to be derivations, which VM
## options are not.
value = deepSeq vmOptions pkgs.hello;
}) vmOptions;
};
flake.nixosConfigurations = genAttrs machines makeConfiguration;
flake.vmOptions = genAttrs machines makeVmOptions;
}

View file

@ -7,10 +7,9 @@ Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description
--------|---------|-------------
[`fedi200`](./dev/fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./dev/fedi201) | fediversity | FediPanel
[`vm02116`](./dev/vm02116) | procolix | Forgejo
[`vm02187`](./dev/vm02187) | procolix | Wiki
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
[`fedi200`](./fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./fedi201) | fediversity | FediPanel
[`vm02116`](./vm02116) | procolix | Forgejo
[`vm02187`](./vm02187) | procolix | Wiki
This table excludes all machines with names starting with `test`.

View file

@ -20,7 +20,7 @@ vmOptions=$(
cd ..
nix eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ../.)).vmOptions
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
" \
--log-format raw --quiet
)
@ -32,12 +32,11 @@ for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
# shellcheck disable=SC2016
printf '[`%s`](./dev/%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
fi
done
cat <<\EOF
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
This table excludes all machines with names starting with `test`.
EOF

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "fedi200";
isFediversityVm = true;
vmId = 200;
proxmox = "fediversity";
description = "Testing machine for Hans";
domain = "abundos.eu";

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "fedi201";
isFediversityVm = true;
vmId = 201;
proxmox = "fediversity";
description = "FediPanel";
domain = "abundos.eu";

View file

@ -6,8 +6,6 @@ let
name = "panel";
in
{
_class = "nixos";
imports = [
(import ../../../panel { }).module
];

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "vm02116";
isFediversityVm = false;
vmId = 2116;
proxmox = "procolix";
description = "Forgejo";
ipv4.address = "185.206.232.34";

View file

@ -5,8 +5,6 @@ let
in
{
_class = "nixos";
services.forgejo = {
enable = true;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "vm02187";
isFediversityVm = false;
vmId = 2187;
proxmox = "procolix";
description = "Wiki";
ipv4.address = "185.206.232.187";

View file

@ -1,8 +1,6 @@
{ config, ... }:
{
_class = "nixos";
services.phpfpm.pools.mediawiki.phpOptions = ''
upload_max_filesize = 1024M;
post_max_size = 1024M;

View file

@ -1,5 +1,5 @@
{
"domain": "fediversity.net",
"domain": "abundos.eu",
"mastodon": { "enable": false },
"peertube": { "enable": false },
"pixelfed": { "enable": false },

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test01";
isFediversityVm = true;
vmId = 7001;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test02";
isFediversityVm = true;
vmId = 7002;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test03";
isFediversityVm = true;
vmId = 7003;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test04";
isFediversityVm = true;
vmId = 7004;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test05";
isFediversityVm = true;
vmId = 7005;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test06";
isFediversityVm = true;
vmId = 7006;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test11";
isFediversityVm = true;
vmId = 7011;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test12";
isFediversityVm = true;
vmId = 7012;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test13";
isFediversityVm = true;
vmId = 7013;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{
_class = "nixops4Resource";
fediversityVm = {
name = "test14";
isFediversityVm = true;
vmId = 7014;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlsYTtMx3hFO8B5B8iHaXL2JKj9izHeC+/AMhIWXBPs cd-age

View file

@ -35,5 +35,4 @@ in
contributors = collectKeys ./contributors;
systems = collectKeys ./systems;
panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub);
cd = removeTrailingWhitespace (readFile ./cd-ssh-key.pub);
}

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.keys = import ./.;
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFXQW5fxJoNY9wtTMsNExgbAbvyljIRGBLjY+USh/0A

View file

@ -1,4 +0,0 @@
# Machines
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.

View file

@ -1,72 +0,0 @@
{ lib, ... }:
let
inherit (lib) mkDefault mkForce;
in
{
_class = "nixops4Resource";
# NOTE: This needs an SSH config entry `forgejo-ci` to locate and access the
# machine. This is because different people access the machine in different
# way (eg. via a proxy vs. via Procolix's VPN). This might look like:
#
# Host forgejo-ci
# HostName 45.142.234.216
# HostKeyAlias forgejo-ci
#
# The `HostKeyAlias` statement is crucial. Without it, deployment will fail
# with the SSH error “Host key verification failed”.
ssh.host = mkForce "forgejo-ci";
fediversityVm = {
name = "forgejo-ci";
domain = "procolix.com";
isFediversityVm = false;
ipv4 = {
interface = "enp1s0f0";
address = "192.168.201.65";
prefixLength = 24;
gateway = "192.168.201.1";
};
ipv6.enable = false;
};
nixos.module =
{ config, ... }:
{
_class = "nixos";
imports = [
./forgejo-actions-runner.nix
];
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
networking = {
nftables.enable = mkForce false;
hostId = "1d6ea552";
};
## NOTE: This is a physical machine, so is not covered by disko
fileSystems."/" = lib.mkForce {
device = "rpool/root";
fsType = "zfs";
};
fileSystems."/home" = {
device = "rpool/home";
fsType = "zfs";
};
fileSystems."/boot" = lib.mkForce {
device = "/dev/disk/by-uuid/50B2-DD3F";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
];
};
};
}

View file

@ -1,47 +0,0 @@
{ pkgs, config, ... }:
{
_class = "nixos";
services.gitea-actions-runner = {
package = pkgs.forgejo-actions-runner;
instances.default = {
enable = true;
name = config.networking.fqdn;
url = "https://git.fediversity.eu";
tokenFile = config.age.secrets.forgejo-runner-token.path;
settings = {
log.level = "info";
runner = {
file = ".runner";
# Take only 1 job at a time to avoid clashing NixOS tests, see #362
capacity = 1;
timeout = "3h";
insecure = false;
fetch_timeout = "5s";
fetch_interval = "2s";
};
};
## This runner supports Docker (with a default Ubuntu image) and native
## modes. In native mode, it contains a few default packages.
labels = [
"docker:docker://node:16-bullseye"
"native:host"
];
hostPackages = with pkgs; [
bash
git
nix
nodejs
];
};
};
## For the Docker mode of the runner.
virtualisation.docker.enable = true;
}

View file

@ -1,54 +0,0 @@
## This file contains a tweak of flake-parts's `mkFlake` function to splice in
## sources taken from npins.
## NOTE: Much of the logic in this file feels like it should be not super
## specific to fediversity. Could it make sense to extract the core of this to
## another place it feels closer to in spirit, such as @fricklerhandwerk's
## flake-inputs (which this code already depends on anyway, and which already
## contained two distinct helpers for migrating away from flakes)? cf
## https://git.fediversity.eu/Fediversity/Fediversity/pulls/447#issuecomment-8671
inputs@{ self, ... }:
let
sources = import ./npins;
inherit (import sources.flake-inputs) import-flake;
# XXX(@fricklerhandwerk): this atrocity is required to splice in a foreign Nixpkgs via flake-parts
# XXX - this is just importing a flake
nixpkgs = import-flake { src = sources.nixpkgs; };
# XXX - this overrides the inputs attached to `self`
inputs' = self.inputs // {
nixpkgs = nixpkgs;
};
self' = self // {
inputs = inputs';
};
flake-parts-lib = import "${sources.flake-parts}/lib.nix" { inherit (nixpkgs) lib; };
in
flakeModule:
flake-parts-lib.mkFlake
{
# XXX - finally we override the overall set of `inputs` -- we need both:
# `flake-parts obtains `nixpkgs` from `self.inputs` and not from `inputs`.
inputs = inputs // {
inherit nixpkgs;
};
self = self';
specialArgs = {
inherit sources;
};
}
{
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [ flakeModule ];
}

View file

@ -36,26 +36,23 @@
"version_upper_bound": null,
"release_prefix": null,
"submodules": false,
"version": "v1.12.0",
"revision": "7121f74b976481bc36877abaf52adab2a178fcbe",
"url": "https://api.github.com/repos/nix-community/disko/tarball/v1.12.0",
"hash": "0wbx518d2x54yn4xh98cgm65wvj0gpy6nia6ra7ns4j63hx14fkq"
"version": "v1.11.0",
"revision": "cdf8deded8813edfa6e65544f69fdd3a59fa2bb4",
"url": "https://api.github.com/repos/nix-community/disko/tarball/v1.11.0",
"hash": "13brimg7z7k9y36n4jc1pssqyw94nd8qvgfjv53z66lv4xkhin92"
},
"flake-inputs": {
"type": "GitRelease",
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "fricklerhandwerk",
"repo": "flake-inputs"
},
"pre_releases": false,
"version_upper_bound": null,
"release_prefix": null,
"branch": "main",
"submodules": false,
"version": "4.1",
"revision": "ad02792f7543754569fe2fd3d5787ee00ef40be2",
"url": "https://api.github.com/repos/fricklerhandwerk/flake-inputs/tarball/4.1",
"hash": "1j57avx2mqjnhrsgq3xl7ih8v7bdhz1kj3min6364f486ys048bm"
"revision": "559574c9cbb8af262f3944b67d60fbf0f6ad03c3",
"url": "https://github.com/fricklerhandwerk/flake-inputs/archive/559574c9cbb8af262f3944b67d60fbf0f6ad03c3.tar.gz",
"hash": "0gbhmp6x2vdzvfnsvqzal3g8f8hx2ia6r73aibc78kazf78m67x6"
},
"flake-parts": {
"type": "Git",
@ -96,19 +93,6 @@
"url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz",
"hash": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w"
},
"home-manager": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "home-manager"
},
"branch": "master",
"submodules": false,
"revision": "863842639722dd12ae9e37ca83bcb61a63b36f6c",
"url": "https://github.com/nix-community/home-manager/archive/863842639722dd12ae9e37ca83bcb61a63b36f6c.tar.gz",
"hash": "0rw9n8d4v87pzlmw7ws15f0sldb51fd9528skpbzmrzl4pinsgij"
},
"htmx": {
"type": "GitRelease",
"repository": {

2
panel/.gitignore vendored
View file

@ -11,5 +11,5 @@ db.sqlite3
src/db.sqlite3
src/static
src/panel/static/htmx*
src/panel/configuration/schema.*
src/panel/configuration/schema.py
.credentials

View file

@ -20,15 +20,13 @@ in
packages = [
pkgs.npins
manage
# NixOps4 and its dependencies
pkgs.nixops4
pkgs.nix
pkgs.openssh
];
env = {
DEPLOYMENT_FLAKE = toString ../.;
DEPLOYMENT_NAME = "test";
env =
let
inherit (builtins) toString;
in
import ./env.nix { inherit lib pkgs; }
// {
NPINS_DIRECTORY = toString ../npins;
CREDENTIALS_DIRECTORY = toString ./.credentials;
DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3";
@ -47,7 +45,7 @@ in
'';
};
module = ./nix/configuration.nix;
module = import ./nix/configuration.nix;
tests = pkgs.callPackage ./nix/tests.nix { };
# re-export inputs so they can be overridden granularly

View file

@ -23,7 +23,7 @@ let
cfg = config.services.${name};
package = pkgs.callPackage ./package.nix { };
environment = {
environment = import ../env.nix { inherit lib pkgs; } // {
DATABASE_URL = "sqlite:////var/lib/${name}/db.sqlite3";
USER_SETTINGS_FILE = pkgs.concatText "configuration.py" [
((pkgs.formats.pythonVars { }).generate "settings.py" cfg.settings)
@ -75,8 +75,6 @@ in
# https://git.dgnum.eu/mdebray/djangonix/
# unlicensed at the time of writing, but surely worth taking some inspiration from...
{
_class = "nixos";
options.services.${name} = {
enable = mkEnableOption "Service configuration for `${name}`";
production = mkOption {
@ -136,18 +134,6 @@ in
type = types.attrsOf types.path;
default = { };
};
nixops4Package = mkOption {
type = types.package;
description = ''
A package providing NixOps4.
TODO: This should not be at the level of the NixOS module, but instead
at the level of the panel's package. Until one finds a way to grab
NixOps4 from the package's npins-based code, we will have to do with
this workaround.
'';
default = pkgs.nixops4;
};
};
config = mkIf cfg.enable {
@ -161,19 +147,7 @@ in
${cfg.domain} =
{
locations = {
"/" = {
proxyPass = "http://localhost:${toString cfg.port}";
extraConfig = ''
## FIXME: The following is necessary because /deployment/status
## can take aaaaages to respond. I think this is horrendous
## design from the panel and should be changed there, but in the
## meantime we bump nginx's timeouts to one hour.
proxy_connect_timeout 3600;
proxy_send_timeout 3600;
proxy_read_timeout 3600;
send_timeout 3600;
'';
};
"/".proxyPass = "http://localhost:${toString cfg.port}";
"/static/".alias = "/var/lib/${name}/static/";
};
}
@ -184,7 +158,6 @@ in
};
};
# needed to place a config file with home-manager
users.users.${name}.isNormalUser = true;
users.groups.${name} = { };
@ -196,11 +169,6 @@ in
pkgs.openssh
python-environment
manage-service
## NixOps4 and its dependencies
cfg.nixops4Package
pkgs.nix
pkgs.openssh
];
preStart = ''
# Auto-migrate on first run or if the package has changed

Some files were not shown because too many files have changed in this diff Show more