Compare commits

..

1 commit

Author SHA1 Message Date
f88ca6e646
add proxmox repro test 2025-08-02 22:31:56 +02:00
143 changed files with 815 additions and 2090 deletions

View file

@ -1,24 +0,0 @@
name: deploy-infra
on:
workflow_dispatch: # allows manual triggering
push:
branches:
- main
jobs:
deploy:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key for age secrets and SSH
run: |
env
mkdir -p ~/.ssh
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'

View file

@ -15,29 +15,17 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix-build -A tests - run: nix-build -A tests
check-data-model:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-mastodon:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
check-peertube: check-peertube:
runs-on: native runs-on: native
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-peertube-service -L - run: cd services && nix-build -A tests.peertube
check-panel: check-panel:
runs-on: native runs-on: native
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix-build -A tests.panel - run: cd panel && nix-build -A tests
check-proxmox-basic: check-proxmox-basic:
runs-on: native runs-on: native
@ -56,35 +44,3 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-cli -L - run: nix build .#checks.x86_64-linux.deployment-cli -L
check-deployment-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L
## NOTE: NixOps4 does not provide a good “dry run” mode, so we instead check
## proxies for resources, namely whether their `.#vmOptions.<machine>` and
## `.#nixosConfigurations.<machine>` outputs evaluate and build correctly, and
## whether we can dry run `infra/proxmox-*.sh` on them. This will not catch
## everything, and in particular not issues in how NixOps4 wires up the
## resources, but that is still something.
check-resources:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: |
set -euC
echo ==================== [ VM Options ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).vmOptions)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.vmOptions-$machine
done
echo
echo ==================== [ NixOS Configurations ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).nixosConfigurations)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.nixosConfigurations-$machine
done

View file

@ -1,24 +0,0 @@
name: update-dependencies
on:
workflow_dispatch: # allows manual triggering
# FIXME: re-enable when manual run works
# schedule:
# - cron: '0 0 1 * *' # monthly
jobs:
lockfile:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update pins
run: nix-shell --run "npins --verbose update"
- name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
with:
remote-instance-api-version: v1
token: "${{ secrets.DEPLOY_KEY }}"
branch: npins-update
commit-message: "npins: update sources"
title: "npins: update sources"

View file

@ -14,12 +14,6 @@ There already exist solutions for self-hosting, but they're not suitable for wha
The ones we're aware of require substantial technical knowledge and time commitment by operators, especially for scaling to thousands of users. The ones we're aware of require substantial technical knowledge and time commitment by operators, especially for scaling to thousands of users.
Not everyone has the expertise and time to run their own server. Not everyone has the expertise and time to run their own server.
## Interactions
To reach these goals, we aim to implement the following interactions between [actors](#actors) (depicted with rounded corners) and system components (see the [glossary](#glossary), depicted with rectangles).
![](https://git.fediversity.eu/Fediversity/meta/raw/branch/main/architecture-docs/interactions.svg)
## Actors ## Actors
- Fediversity project team - Fediversity project team
@ -63,11 +57,11 @@ To reach these goals, we aim to implement the following interactions between [ac
- [Fediverse](https://en.wikipedia.org/wiki/Fediverse) - [Fediverse](https://en.wikipedia.org/wiki/Fediverse)
A collection of social networking applications that can communicate with each other using a common protocol. A collection of social networking services that can communicate with each other using a common protocol.
- Application - Service
User-facing software (e.g. from Fediverse) run by the hosting provider for an operator. A Fediverse application run by the hosting provider for an operator.
- Configuration - Configuration
@ -79,11 +73,11 @@ To reach these goals, we aim to implement the following interactions between [ac
Make a resource, such as a virtual machine, available for use. Make a resource, such as a virtual machine, available for use.
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for applications run by operators. > Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for services run by operators.
- Deploy - Deploy
Put software, such as applications, onto computers. Put software, such as services, onto computers.
The software includes technical configuration that links software components. The software includes technical configuration that links software components.
Most user-facing configuration remains untouched by the deployment process. Most user-facing configuration remains untouched by the deployment process.
@ -91,7 +85,7 @@ To reach these goals, we aim to implement the following interactions between [ac
- Migrate - Migrate
Move service configurations and deployment state, including user data, from one hosting provider to another. Move service configurations and user data to a different hosting provider.
- [NixOps4](https://github.com/nixops4/nixops4) - [NixOps4](https://github.com/nixops4/nixops4)
@ -109,18 +103,6 @@ To reach these goals, we aim to implement the following interactions between [ac
> Example: We need a resource provider for obtaining deployment secrets from a database. > Example: We need a resource provider for obtaining deployment secrets from a database.
- Runtime backend
A type of digital environment one can run operating systems such as NixOS on, e.g. bare-metal, a hypervisor, or a container runtime.
- Runtime environment
The thing a deployment runs on, an interface against which the deployment is working. See runtime backend.
- Runtime config
Configuration logic specific to a runtime backend, e.g. how to deploy, how to access object storage.
## Development ## Development
All the code made for this project is freely licenced under [EUPL](https://en.m.wikipedia.org/wiki/European_Union_Public_Licence). All the code made for this project is freely licenced under [EUPL](https://en.m.wikipedia.org/wiki/European_Union_Public_Licence).
@ -154,3 +136,6 @@ details as to what they are for. As an overview:
- [`services/`](./services) contains our effort to make Fediverse applications - [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting. work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

View file

@ -10,9 +10,6 @@ let
gitignore gitignore
; ;
inherit (pkgs) lib; inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check = pre-commit-check =
(import "${git-hooks}/nix" { (import "${git-hooks}/nix" {
inherit nixpkgs system; inherit nixpkgs system;
@ -44,35 +41,10 @@ in
shell = pkgs.mkShellNoCC { shell = pkgs.mkShellNoCC {
inherit (pre-commit-check) shellHook; inherit (pre-commit-check) shellHook;
buildInputs = pre-commit-check.enabledPackages; buildInputs = pre-commit-check.enabledPackages;
packages =
let
test-loop = pkgs.writeShellApplication {
name = "test-loop";
runtimeInputs = [
pkgs.watchexec
pkgs.nix-unit
];
text = ''
watchexec -w ${builtins.toString ./.} -- nix-unit ${builtins.toString ./deployment/data-model-test.nix} "$@"
'';
};
in
[
pkgs.npins
pkgs.nil
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
pkgs.nix-unit
test-loop
nixops4.packages.${system}.default
];
}; };
tests = { tests = {
inherit pre-commit-check; inherit pre-commit-check;
panel = panel.tests;
}; };
# re-export inputs so they can be overridden granularly # re-export inputs so they can be overridden granularly

View file

@ -3,13 +3,6 @@
This directory contains work to generate a full Fediversity deployment from a minimal configuration. This directory contains work to generate a full Fediversity deployment from a minimal configuration.
This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services. This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services.
## Data model
The core piece of the project is the [Fediversity data model](./data-model.nix), which describes all entities and their interactions.
What can be done with it is exemplified in the [evaluation tests](./data-model-test.nix).
Run `test-loop` in the development environment when hacking on the data model or adding tests.
## Checks ## Checks
There are three levels of deployment checks: `basic`, `cli`, `panel`. There are three levels of deployment checks: `basic`, `cli`, `panel`.
@ -116,8 +109,8 @@ flowchart LR
target_machines -->|get certs| acme target_machines -->|get certs| acme
``` ```
### Service deployment check from the FediPanel ### [WIP] Service deployment check from the panel
This is a full deployment check running the [FediPanel](../panel) on the deployer machine, deploying some services through it and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too. This is a full deployment check running the panel on the deployer machine, deploying some services through the panel and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic and CLI deployment checks, the only difference being that `deployer` runs NixOps4 only indirectly via the panel, and the `client` node is the one that triggers the deployment via a browser, the way a human would. It builds upon the basic and CLI deployment checks.

View file

@ -1,8 +0,0 @@
{
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
}

View file

@ -1,14 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
}

View file

@ -1,36 +0,0 @@
{
inputs,
sources,
lib,
providers,
...
}:
let
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
in
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = lib.genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
}

View file

@ -0,0 +1,54 @@
{
self,
inputs,
lib,
...
}:
let
inherit (lib) genAttrs;
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = /. + (builtins.unsafeDiscardStringContext self);
pathFromRoot = ./.;
in
{
perSystem =
{ pkgs, ... }:
{
checks.deployment-basic = pkgs.testers.runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args.inputs = inputs;
inherit targetMachines pathToRoot pathFromRoot;
};
};
nixops4Deployments.check-deployment-basic =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args.inputs = inputs;
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
};
}

View file

@ -1,22 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-basic = {
imports = [ ./deployment/check/basic/deployment.nix ];
_module.args = { inherit inputs sources; };
};
}
);
}

View file

@ -1,15 +1,8 @@
{ inputs, lib, ... }: { inputs, ... }:
{ {
_class = "nixosTest";
name = "deployment-basic"; name = "deployment-basic";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
];
nodes.deployer = nodes.deployer =
{ pkgs, ... }: { pkgs, ... }:
{ {
@ -17,12 +10,6 @@
inputs.nixops4.packages.${pkgs.system}.default inputs.nixops4.packages.${pkgs.system}.default
]; ];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule = system.extraDependenciesFromModule =
{ pkgs, ... }: { pkgs, ... }:
{ {

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1 @@
## This is a placeholder file. It will be overwritten by the test.

View file

@ -1,59 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
}

View file

@ -0,0 +1,87 @@
{
self,
inputs,
lib,
...
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = /. + (builtins.unsafeDiscardStringContext self);
pathFromRoot = ./.;
enableAcme = true;
in
{
perSystem =
{ pkgs, ... }:
{
checks.deployment-cli = pkgs.testers.runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args.inputs = inputs;
inherit
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
};
};
nixops4Deployments =
let
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args.inputs = inputs;
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
};
}

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments = import ./deployment/check/cli/deployments.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,9 +1,4 @@
{ { inputs, hostPkgs, ... }:
inputs,
hostPkgs,
lib,
...
}:
let let
## Some places need a dummy file that will in fact never be used. We create ## Some places need a dummy file that will in fact never be used. We create
@ -12,25 +7,8 @@ let
in in
{ {
_class = "nixosTest";
name = "deployment-cli"; name = "deployment-cli";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployments.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in panel test)
../../default.nix
../../options.nix
../../configuration.sample.json
../../../services/fediversity
];
nodes.deployer = nodes.deployer =
{ pkgs, ... }: { pkgs, ... }:
{ {
@ -101,16 +79,10 @@ in
## and check that they are working properly. ## and check that they are working properly.
extraTestScript = '' extraTestScript = ''
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"): with subtest("Run deployment with no services enabled"):
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2") deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
with subtest("Check the status of the services - there should still be none"): with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service") garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service") mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service") peertube.fail("systemctl status peertube.service")

View file

@ -3,7 +3,6 @@
lib, lib,
pkgs, pkgs,
config, config,
sources,
... ...
}: }:
@ -17,8 +16,6 @@ let
in in
{ {
_class = "nixos";
imports = [ ./sharedOptions.nix ]; imports = [ ./sharedOptions.nix ];
options.system.extraDependenciesFromModule = mkOption { options.system.extraDependenciesFromModule = mkOption {
@ -54,13 +51,11 @@ in
system.extraDependencies = system.extraDependencies =
[ [
inputs.nixops4 "${inputs.flake-parts}"
inputs.nixops4-nixos "${inputs.flake-parts.inputs.nixpkgs-lib}"
inputs.nixpkgs "${inputs.nixops4}"
"${inputs.nixops4-nixos}"
sources.flake-parts "${inputs.nixpkgs}"
sources.flake-inputs
sources.git-hooks
pkgs.stdenv pkgs.stdenv
pkgs.stdenvNoCC pkgs.stdenvNoCC
@ -77,7 +72,7 @@ in
config.system.extraDependenciesFromModule config.system.extraDependenciesFromModule
{ {
nixpkgs.hostPlatform = "x86_64-linux"; nixpkgs.hostPlatform = "x86_64-linux";
_module.args = { inherit inputs sources; }; _module.args.inputs = inputs;
enableAcme = config.enableAcme; enableAcme = config.enableAcme;
acmeNodeIP = config.acmeNodeIP; acmeNodeIP = config.acmeNodeIP;
} }

View file

@ -3,7 +3,6 @@
lib, lib,
config, config,
hostPkgs, hostPkgs,
sources,
... ...
}: }:
@ -13,7 +12,6 @@ let
toJSON toJSON
; ;
inherit (lib) inherit (lib)
types
fileset fileset
mkOption mkOption
genAttrs genAttrs
@ -28,6 +26,14 @@ let
forConcat = xs: f: concatStringsSep "\n" (map f xs); forConcat = xs: f: concatStringsSep "\n" (map f xs);
## The whole repository, with the flake at its root.
## FIXME: We could probably have fileset be the union of ./. with flake.nix
## and flake.lock - I doubt we need anything else.
src = fileset.toSource {
fileset = config.pathToRoot;
root = config.pathToRoot;
};
## We will need to override some inputs by the empty flake, so we make one. ## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } '' emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out mkdir $out
@ -36,8 +42,6 @@ let
in in
{ {
_class = "nixosTest";
imports = [ imports = [
./sharedOptions.nix ./sharedOptions.nix
]; ];
@ -46,47 +50,15 @@ in
## FIXME: I wish I could just use `testScript` but with something like ## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else. ## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { }; extraTestScript = mkOption { };
sourceFileset = mkOption {
## FIXME: grab `lib.types.fileset` from NixOS, once upstreaming PR
## https://github.com/NixOS/nixpkgs/pull/428293 lands.
type = types.mkOptionType {
name = "fileset";
description = "fileset";
descriptionClass = "noun";
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
merge = (_: defs: fileset.unions (map (x: x.value) defs));
};
description = ''
A fileset that will be copied to the deployer node in the current
working directory. This should contain all the files that are
necessary to run that particular test, such as the NixOS
modules necessary to evaluate a deployment.
'';
};
}; };
config = { config = {
sourceFileset = fileset.unions [
# NOTE: not the flake itself; it will be overridden.
../../../mkFlake.nix
../../../flake.lock
../../../npins
./sharedOptions.nix
./targetNode.nix
./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
nodes = nodes =
{ {
deployer = { deployer = {
imports = [ ./deployerNode.nix ]; imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; }; _module.args.inputs = inputs;
enableAcme = config.enableAcme; enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress; acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
}; };
@ -113,7 +85,7 @@ in
genAttrs config.targetMachines (_: { genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ]; imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; }; _module.args.inputs = inputs;
enableAcme = config.enableAcme; enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null; acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
}); });
@ -127,16 +99,8 @@ in
${n}.wait_for_unit("multi-user.target") ${n}.wait_for_unit("multi-user.target")
'')} '')}
## A subset of the repository that is necessary for this test. It will be
## copied inside the test. The smaller this set, the faster our CI, because we
## won't need to re-run when things change outside of it.
with subtest("Unpacking"): with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${ deployer.succeed("cp -r --no-preserve=mode ${src}/* .")
fileset.toSource {
root = ../../..;
fileset = config.sourceFileset;
}
}/* .")
with subtest("Configure the network"): with subtest("Configure the network"):
${forConcat config.targetMachines ( ${forConcat config.targetMachines (
@ -154,6 +118,7 @@ in
with subtest("Configure the deployer key"): with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""") deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip() deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
deployer.succeed(f"echo '{deployer_key}' > ${config.pathFromRoot}/deployer.pub")
${forConcat config.targetMachines (tm: '' ${forConcat config.targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys") ${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')} '')}
@ -166,16 +131,11 @@ in
## NOTE: This is super slow. It could probably be optimised in Nix, for ## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store. ## instance by allowing to grab things directly from the host's store.
## with subtest("Override the lock"):
## NOTE: We use the repository as-is (cf `src` above), overriding only
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
## lock file to use locally available inputs, as we cannot download them.
##
with subtest("Override the flake and its lock"):
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
deployer.succeed(""" deployer.succeed("""
nix flake lock --extra-experimental-features 'flakes nix-command' \ nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \ --offline -v \
--override-input flake-parts ${inputs.flake-parts} \
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \ --override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
\ \
--override-input nixops4-nixos ${inputs.nixops4-nixos} \ --override-input nixops4-nixos ${inputs.nixops4-nixos} \
@ -187,6 +147,9 @@ in
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
} \ } \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \ --override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
\
--override-input nixpkgs ${inputs.nixpkgs} \
--override-input git-hooks ${inputs.git-hooks} \
; ;
""") """)

View file

@ -11,7 +11,6 @@ let
inherit (lib) mkOption types; inherit (lib) mkOption types;
in in
# `config` not set and imported from multiple places: no fixed module class
{ {
options = { options = {
targetMachines = mkOption { targetMachines = mkOption {

View file

@ -12,8 +12,6 @@ let
in in
{ {
_class = "nixos";
imports = [ imports = [
(modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix") (modulesPath + "/../lib/testing/nixos-test-base.nix")
@ -52,16 +50,13 @@ in
}; };
security.pki.certificateFiles = [ security.pki.certificateFiles = [
## NOTE: This certificate is the one used by the Pebble HTTPS server.
## This is NOT the root CA of the Pebble server. We do add it here so
## that Pebble clients can talk to its API, but this will not allow
## those machines to verify generated certificates.
testCerts.ca.cert testCerts.ca.cert
]; ];
## FIXME: it is a bit sad that all this logistics is necessary. look into ## FIXME: it is a bit sad that all this logistics is necessary. look into
## better DNS stuff ## better DNS stuff
networking.extraHosts = "${config.acmeNodeIP} acme.test"; networking.extraHosts = "${config.acmeNodeIP} acme.test";
}) })
]; ];
} }

View file

@ -2,7 +2,6 @@
inputs, inputs,
lib, lib,
config, config,
sources,
... ...
}: }:
@ -13,8 +12,6 @@ let
in in
{ {
_class = "nixops4Resource";
imports = [ ./sharedOptions.nix ]; imports = [ ./sharedOptions.nix ];
options = { options = {
@ -41,7 +38,7 @@ in
(lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json")) (lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json"))
]; ];
_module.args = { inherit inputs sources; }; _module.args.inputs = inputs;
enableAcme = config.enableAcme; enableAcme = config.enableAcme;
acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip")); acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip"));

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,58 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
args;
in
makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env == "" then
throw "The DEPLOYMENT environment needs to be set. You do not want to use this deployment unless in the `deployment-panel` NixOS test."
else
env
)
)

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-panel = import ./deployment/check/panel/deployment.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,377 +0,0 @@
{
inputs,
lib,
hostPkgs,
config,
...
}:
let
inherit (lib)
getExe
;
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "dummy";
panelPort = 8000;
panelUser = "test";
panelEmail = "test@test.com";
panelPassword = "ouiprdaaa43"; # panel's manager complains if too close to username or email
fediUser = "test";
fediEmail = "test@test.com";
fediPassword = "testtest";
fediName = "Testy McTestface";
toPythonBool = b: if b then "True" else "False";
interactWithPanel =
{
baseUri,
enableMastodon,
enablePeertube,
enablePixelfed,
}:
hostPkgs.writers.writePython3Bin "interact-with-panel"
{
libraries = with hostPkgs.python3Packages; [ selenium ];
flakeIgnore = [
"E302" # expected 2 blank lines, found 0
"E303" # too many blank lines
"E305" # expected 2 blank lines after end of function or class
"E501" # line too long (> 79 characters)
"E731" # do not assign lambda expression, use a def
];
}
''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
print("Create and configure driver...")
options = Options()
options.add_argument("--headless")
options.binary_location = "${getExe hostPkgs.firefox-unwrapped}"
service = webdriver.FirefoxService(executable_path="${getExe hostPkgs.geckodriver}")
driver = webdriver.Firefox(options=options, service=service)
driver.set_window_size(1280, 960)
driver.implicitly_wait(360)
driver.command_executor.set_timeout(3600)
print("Open login page...")
driver.get("${baseUri}/login/")
print("Enter username...")
driver.find_element(By.XPATH, "//input[@name = 'username']").send_keys("${panelUser}")
print("Enter password...")
driver.find_element(By.XPATH, "//input[@name = 'password']").send_keys("${panelPassword}")
print("Click Login button...")
driver.find_element(By.XPATH, "//button[normalize-space() = 'Login']").click()
print("Open configuration page...")
driver.get("${baseUri}/configuration/")
# Helpers to actually set and not add or switch input values.
def input_set(elt, keys):
elt.clear()
elt.send_keys(keys)
def checkbox_set(elt, new_value):
if new_value != elt.is_selected():
elt.click()
print("Enable Fediversity...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'enable']"), True)
print("Fill in initialUser info...")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.username']"), "${fediUser}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.password']"), "${fediPassword}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.email']"), "${fediEmail}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.displayName']"), "${fediName}")
print("Enable services...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
print("Start deployment...")
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
print("Wait for deployment status to show up...")
get_deployment_result = lambda d: d.find_element(By.XPATH, "//div[@id = 'deployment-result']//p")
WebDriverWait(driver, timeout=3660, poll_frequency=10).until(get_deployment_result)
deployment_result = get_deployment_result(driver).get_attribute('innerHTML')
print("Quit...")
driver.quit()
match deployment_result:
case 'Deployment Succeeded':
print("Deployment has succeeded; exiting normally")
exit(0)
case 'Deployment Failed':
print("Deployment has failed; exiting with return code `1`")
exit(1)
case _:
print(f"Unexpected deployment result: {deployment_result}; exiting with return code `2`")
exit(2)
'';
in
{
_class = "nixosTest";
name = "deployment-panel";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in CLI test)
../../default.nix
../../options.nix
../../../services/fediversity
];
## The panel's module sets `nixpkgs.overlays` which clashes with
## `pkgsReadOnly`. We disable it here.
node.pkgsReadOnly = false;
nodes.deployer =
{ pkgs, ... }:
{
imports = [
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;
domain = "deployer";
secrets = {
SECRET_KEY = dummyFile;
};
port = panelPort;
deployment = {
flake = "/run/fedipanel/flake";
name = "check-deployment-panel";
};
};
environment.systemPackages = [ pkgs.expect ];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy # a configuration checker for nginx
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
nodes.client =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
httpie
dnsutils # for `dig`
openssl
cacert
wget
python3
python3Packages.selenium
firefox-unwrapped
geckodriver
];
security.pki.certificateFiles = [
config.nodes.acme.test-support.acme.caCert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
## TODO: We want a nicer way to control where the FediPanel consumes its
## flake, which can default to the store but could also be somewhere else if
## someone wanted to change the code of the flake.
##
with subtest("Give the panel access to the flake"):
deployer.succeed("mkdir /run/fedipanel /run/fedipanel/flake >&2")
deployer.succeed("cp -R . /run/fedipanel/flake >&2")
deployer.succeed("chown -R panel:panel /run/fedipanel >&2")
## TODO: I want a programmatic way to provide an SSH key to the panel (and
## therefore NixOps4). This should happen either in the Python code, but
## maybe it is fair that that one picks up on the user's key? It could
## also be in the Nix packaging.
##
with subtest("Set up the panel's SSH keys"):
deployer.succeed("mkdir /home/panel/.ssh >&2")
deployer.succeed("cp -R /root/.ssh/* /home/panel/.ssh >&2")
deployer.succeed("chown -R panel:panel /home/panel/.ssh >&2")
deployer.succeed("chmod 600 /home/panel/.ssh/* >&2")
## TODO: This is a hack to accept the root CA used by Pebble on the client
## machine. Pebble randomizes everything, so the only way to get it is to
## call the /roots/0 endpoint at runtime, leaving not much margin for a nice
## Nixy way of adding the certificate. There is no way around it as this is
## by design in Pebble, showing in fact that Pebble was not the appropriate
## tool for our use and that nixpkgs does not in fact provide an easy way to
## generate _usable_ certificates in NixOS tests. I suggest we merge this,
## and track the task to set it up in a cleaner way. I would tackle this in
## a subsequent PR, and hopefully even contribute this BetterWay(tm) to
## nixpkgs. — Niols
##
with subtest("Set up ACME root CA on client"):
client.succeed("""
cd /etc/ssl/certs
curl -o pebble-root-ca.pem https://acme.test:15000/roots/0
curl -o pebble-intermediate-ca.pem https://acme.test:15000/intermediates/0
{ cat ca-bundle.crt
cat pebble-root-ca.pem
cat pebble-intermediate-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
""")
## TODO: I would hope for a more declarative way to add users. This should
## be handled by the Nix packaging of the FediPanel. — Niols
##
with subtest("Create panel user"):
deployer.succeed("""
expect -c '
spawn manage createsuperuser --username ${panelUser} --email ${panelEmail}
expect "Password: "; send "${panelPassword}\\n";
expect "Password (again): "; send "${panelPassword}\\n"
interact
' >&2
""")
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = false;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = true;
enablePeertube = false;
enablePixelfed = true;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = true;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,36 +0,0 @@
{
runNixOSTest,
sources,
system,
}:
let
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: _: {
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# this seems picky about the version used
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-ha-manager
;
};
in
runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
./proxmoxTest.nix
];
}

View file

@ -0,0 +1,40 @@
{
perSystem =
{
system,
...
}:
let
sources = import ../../../npins;
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: _: {
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# this seems picky about the version used
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-ha-manager
;
};
in
{
checks.proxmox-basic = pkgs.testers.runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
./proxmoxTest.nix
];
};
};
}

View file

@ -1,70 +0,0 @@
let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit inputs;
};
modules = [
module
./data-model.nix
];
}).config;
in
{
_class = "nix-unit";
test-eval = {
expr =
let
fediversity = eval (
{ config, ... }:
{
config = {
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options = {
enable = lib.mkEnableOption "Hello in the shell";
};
};
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
};
};
options = {
example-configuration = mkOption {
type = config.configuration;
readOnly = true;
default = {
enable = true;
applications.hello.enable = true;
};
};
};
}
);
in
{
inherit (fediversity)
example-configuration
;
};
expected = {
example-configuration = {
enable = true;
applications.hello.enable = true;
};
};
};
}

View file

@ -1,89 +0,0 @@
{
lib,
config,
...
}:
let
inherit (lib) mkOption types;
inherit (lib.types)
attrsOf
attrTag
deferredModuleWith
submodule
optionType
functionTo
;
functionType = import ./function.nix;
application-resources = {
options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
);
};
};
in
{
_class = "nixops4Deployment";
options = {
applications = mkOption {
description = "Collection of Fediversity applications";
type = attrsOf (
submodule (application: {
_class = "fediversity-application";
options = {
description = mkOption {
description = "Description to be shown in the application overview";
type = types.str;
};
module = mkOption {
description = "Operator-facing configuration options for the application";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-application-config"; } ]; };
};
implementation = mkOption {
description = "Mapping of application configuration to deployment resources, a description of what an application needs to run";
type = application.config.config-mapping.function-type;
};
resources = mkOption {
description = "Compute resources required by an application";
type = functionTo application.config.config-mapping.output-type;
readOnly = true;
default = input: (application.config.implementation input).output;
};
config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType;
readOnly = true;
default = {
input-type = application.config.module;
output-type = application-resources;
};
};
};
})
);
};
configuration = mkOption {
description = "Configuration type declaring options to be set by operators";
type = optionType;
readOnly = true;
default = submodule {
options = {
enable = lib.mkEnableOption {
description = "your Fediversity configuration";
};
applications = lib.mapAttrs (
_name: application:
mkOption {
description = application.description;
type = submodule application.module;
default = { };
}
) config.applications;
};
};
};
};
}

View file

@ -33,29 +33,11 @@
## information coming from the FediPanel. ## information coming from the FediPanel.
## ##
## FIXME: lock step the interface with the definitions in the FediPanel ## FIXME: lock step the interface with the definitions in the FediPanel
panelConfigNullable: panelConfig:
let let
inherit (lib) mkIf; inherit (lib) mkIf;
## The convertor from module options to JSON schema does not generate proper
## JSON schema types, forcing us to use nullable fields for default values.
## However, working with those fields in the deployment code is annoying (and
## unusual for Nix programmers), so we sanitize the input here and add back
## the default value by hand.
nonNull = x: v: if x == null then v else x;
panelConfig = {
domain = nonNull panelConfigNullable.domain "fediversity.net";
initialUser = nonNull panelConfigNullable.initialUser {
displayName = "Testy McTestface";
username = "test";
password = "testtest";
email = "test@test.com";
};
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
peertube = nonNull panelConfigNullable.peertube { enable = false; };
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
};
in in
## Regular arguments of a NixOps4 deployment module. ## Regular arguments of a NixOps4 deployment module.
@ -65,8 +47,6 @@ let
cfg = config.deployment; cfg = config.deployment;
in in
{ {
_class = "nixops4Deployment";
options = { options = {
deployment = lib.mkOption { deployment = lib.mkOption {
description = '' description = ''
@ -142,7 +122,7 @@ in
{ pkgs, ... }: { pkgs, ... }:
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) { mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) {
fediversity = { fediversity = {
inherit (cfg) domain; inherit (panelConfig) domain;
garage.enable = true; garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; }; pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; }; mastodon = mastodonS3KeyConfig { inherit pkgs; };

View file

@ -1,31 +1,7 @@
{ inputs, sources, ... }:
{ {
_class = "flake"; imports = [
./check/basic/flake-part.nix
perSystem = ./check/proxmox/flake-part.nix
{ pkgs, system, ... }: ./check/cli/flake-part.nix
{ ];
checks = {
proxmox-basic = import ./check/proxmox {
inherit (pkgs.testers) runNixOSTest;
inherit sources system;
};
deployment-basic = import ./check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-cli = import ./check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-panel = import ./check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
};
};
} }

View file

@ -1,37 +0,0 @@
/**
Modular function type
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
inherit (types)
deferredModule
submodule
functionTo
optionType
;
in
{
options = {
input-type = mkOption {
type = deferredModule;
};
output-type = mkOption {
type = deferredModule;
};
function-type = mkOption {
type = optionType;
readOnly = true;
default = functionTo (submodule {
options = {
input = mkOption {
type = submodule config.input-type;
};
output = mkOption {
type = submodule config.output-type;
};
};
});
};
};
}

View file

@ -17,8 +17,6 @@ let
inherit (lib) types mkOption; inherit (lib) types mkOption;
in in
{ {
_class = "nixops4Deployment";
options = { options = {
enable = lib.mkEnableOption "Fediversity configuration"; enable = lib.mkEnableOption "Fediversity configuration";
domain = mkOption { domain = mkOption {

140
flake.lock generated
View file

@ -59,6 +59,22 @@
} }
}, },
"flake-compat_2": { "flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1733328505, "lastModified": 1733328505,
@ -74,7 +90,7 @@
"type": "github" "type": "github"
} }
}, },
"flake-compat_3": { "flake-compat_4": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1696426674, "lastModified": 1696426674,
@ -127,6 +143,24 @@
} }
}, },
"flake-parts_3": { "flake-parts_3": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_3"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_4": {
"inputs": { "inputs": {
"nixpkgs-lib": [ "nixpkgs-lib": [
"nixops4-nixos", "nixops4-nixos",
@ -167,12 +201,32 @@
"type": "github" "type": "github"
} }
}, },
"git-hooks-nix": { "git-hooks": {
"inputs": { "inputs": {
"flake-compat": "flake-compat", "flake-compat": "flake-compat",
"gitignore": "gitignore", "gitignore": "gitignore",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs"
}, },
"locked": {
"lastModified": 1742649964,
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"gitignore": "gitignore_2",
"nixpkgs": "nixpkgs_2"
},
"locked": { "locked": {
"lastModified": 1737465171, "lastModified": 1737465171,
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=", "narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
@ -227,6 +281,27 @@
} }
}, },
"gitignore": { "gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"gitignore_2": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
"nixops4-nixos", "nixops4-nixos",
@ -266,8 +341,8 @@
}, },
"nix": { "nix": {
"inputs": { "inputs": {
"flake-compat": "flake-compat_2", "flake-compat": "flake-compat_3",
"flake-parts": "flake-parts_3", "flake-parts": "flake-parts_4",
"git-hooks-nix": "git-hooks-nix_2", "git-hooks-nix": "git-hooks-nix_2",
"nixfmt": "nixfmt", "nixfmt": "nixfmt",
"nixpkgs": [ "nixpkgs": [
@ -341,10 +416,10 @@
}, },
"nixops4": { "nixops4": {
"inputs": { "inputs": {
"flake-parts": "flake-parts_2", "flake-parts": "flake-parts_3",
"nix": "nix", "nix": "nix",
"nix-cargo-integration": "nix-cargo-integration", "nix-cargo-integration": "nix-cargo-integration",
"nixpkgs": "nixpkgs_2", "nixpkgs": "nixpkgs_3",
"nixpkgs-old": "nixpkgs-old" "nixpkgs-old": "nixpkgs-old"
}, },
"locked": { "locked": {
@ -363,7 +438,7 @@
}, },
"nixops4-nixos": { "nixops4-nixos": {
"inputs": { "inputs": {
"flake-parts": "flake-parts", "flake-parts": "flake-parts_2",
"git-hooks-nix": "git-hooks-nix", "git-hooks-nix": "git-hooks-nix",
"nixops4": "nixops4", "nixops4": "nixops4",
"nixops4-nixos": [ "nixops4-nixos": [
@ -445,6 +520,18 @@
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz" "url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
} }
}, },
"nixpkgs-lib_3": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-old": { "nixpkgs-old": {
"locked": { "locked": {
"lastModified": 1735563628, "lastModified": 1735563628,
@ -478,6 +565,22 @@
} }
}, },
"nixpkgs_2": { "nixpkgs_2": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": { "locked": {
"lastModified": 1738410390, "lastModified": 1738410390,
"narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=", "narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=",
@ -493,6 +596,22 @@
"type": "github" "type": "github"
} }
}, },
"nixpkgs_4": {
"locked": {
"lastModified": 1740463929,
"narHash": "sha256-4Xhu/3aUdCKeLfdteEHMegx5ooKQvwPHNkOgNCXQrvc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "5d7db4668d7a0c6cc5fc8cf6ef33b008b2b1ed8b",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"parts": { "parts": {
"inputs": { "inputs": {
"nixpkgs-lib": [ "nixpkgs-lib": [
@ -518,7 +637,7 @@
}, },
"purescript-overlay": { "purescript-overlay": {
"inputs": { "inputs": {
"flake-compat": "flake-compat_3", "flake-compat": "flake-compat_4",
"nixpkgs": [ "nixpkgs": [
"nixops4-nixos", "nixops4-nixos",
"nixops4", "nixops4",
@ -561,11 +680,14 @@
}, },
"root": { "root": {
"inputs": { "inputs": {
"flake-parts": "flake-parts",
"git-hooks": "git-hooks",
"nixops4": [ "nixops4": [
"nixops4-nixos", "nixops4-nixos",
"nixops4" "nixops4"
], ],
"nixops4-nixos": "nixops4-nixos" "nixops4-nixos": "nixops4-nixos",
"nixpkgs": "nixpkgs_4"
} }
}, },
"rust-overlay": { "rust-overlay": {

View file

@ -1,36 +1,42 @@
{ {
inputs = { inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11"; # consumed by flake-parts
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
nixops4.follows = "nixops4-nixos/nixops4"; nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos"; nixops4-nixos.url = "github:nixops4/nixops4-nixos";
}; };
outputs = outputs =
inputs: inputs@{ flake-parts, ... }:
import ./mkFlake.nix inputs ( let
{ inputs, sources, ... }: sources = import ./npins;
{ inherit (sources) git-hooks agenix;
in
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [ imports = [
"${sources.git-hooks}/flake-module.nix" (import "${git-hooks}/flake-module.nix")
inputs.nixops4.modules.flake.default inputs.nixops4.modules.flake.default
./deployment/flake-part.nix ./deployment/flake-part.nix
./infra/flake-part.nix ./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
]; ];
perSystem = perSystem =
{ {
pkgs, pkgs,
lib, lib,
system, inputs',
... ...
}: }:
{ {
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
};
formatter = pkgs.nixfmt-rfc-style; formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks = pre-commit.settings.hooks =
@ -46,7 +52,21 @@
trim-trailing-whitespace.enable = true; trim-trailing-whitespace.enable = true;
shellcheck.enable = true; shellcheck.enable = true;
}; };
devShells.default = pkgs.mkShell {
packages = [
pkgs.npins
pkgs.nil
(pkgs.callPackage "${agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
# exposing this env var as a hack to pass info in from form
(inputs'.nixops4.packages.default.overrideAttrs {
impureEnvVars = [ "DEPLOYMENT" ];
})
];
};
};
}; };
} }
);
}

View file

@ -1,19 +1,20 @@
# Infra # Infra
This directory contains the definition of [the VMs](../machines/machines.md) that host our This directory contains the definition of [the VMs](machines.md) that host our
infrastructure. infrastructure.
## Provisioning VMs with an initial configuration ## Provisioning VMs with an initial configuration
> NOTE[Niols]: This is still very manual and clunky. Two things will happen: NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
> 1. In the near future, I will improve the provisioning script to make this a bit less clunky. future, I will improve the provisioning script to make this a bit less clunky.
> 2. In the far future, NixOps4 will be able to communicate with Proxmox directly and everything will become much cleaner. In the far future, NixOps4 will be able to communicate with Proxmox directly and
everything will become much cleaner.
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX` 1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`. above 100. For instance, `fedi117`.
2. Add a basic configuration for the machine. These typically go in 2. Add a basic configuration for the machine. These typically go in
`machines/dev/<name>/default.nix`. You can look at other `fediXXX` VMs to `infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
find inspiration. You probably do not need a `nixos.module` option at this find inspiration. You probably do not need a `nixos.module` option at this
point. point.
@ -24,7 +25,8 @@ infrastructure.
Those files need to exist during provisioning, but their content matters only Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration. when updating the machines' configuration.
> FIXME: Remove this step by making the provisioning script not fail with the public key does not exist yet. FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script: 3. Run the provisioning script:
``` ```
@ -42,11 +44,11 @@ infrastructure.
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
``` ```
> FIXME: Make the provisioning script do that for us. FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines: 7. Regenerate the list of machines:
``` ```
sh machines/machines.md.sh sh infra/machines.md.sh
``` ```
Commit it with the machine's configuration, public key, etc. Commit it with the machine's configuration, public key, etc.
@ -54,7 +56,7 @@ infrastructure.
just enough for it to boot and be reachable. Go on to the next section to just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration. update the machine and put an actual configuration.
> FIXME: Figure out why the full configuration isn't on the machine at this FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it. point and fix it.
## Updating existing VM configurations ## Updating existing VM configurations

View file

@ -5,9 +5,8 @@ let
in in
{ {
_class = "nixos";
imports = [ imports = [
./hardware.nix
./networking.nix ./networking.nix
./users.nix ./users.nix
]; ];
@ -23,9 +22,4 @@ in
nix.extraOptions = '' nix.extraOptions = ''
experimental-features = nix-command flakes experimental-features = nix-command flakes
''; '';
boot.loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
} }

View file

@ -1,20 +1,20 @@
{ ... }: { modulesPath, ... }:
{ {
_class = "nixos"; imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
## FIXME: It would be nice, but the following leads to infinite recursion
## in the way we currently plug `sources` in.
##
# imports = [
# "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# ];
boot = { boot = {
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
initrd = { initrd = {
availableKernelModules = [ availableKernelModules = [
"ata_piix" "ata_piix"
"uhci_hcd" "uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod" "sd_mod"
"sr_mod" "sr_mod"
]; ];

View file

@ -1,64 +1,63 @@
{ config, lib, ... }: { config, lib, ... }:
let let
inherit (lib) mkDefault mkIf mkMerge; inherit (lib) mkDefault;
in in
{ {
_class = "nixos";
config = { config = {
services.openssh = { services.openssh = {
enable = true; enable = true;
settings.PasswordAuthentication = false; settings.PasswordAuthentication = false;
}; };
networking = mkMerge [ networking = {
{
hostName = config.fediversityVm.name; hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain; domain = config.fediversityVm.domain;
## REVIEW: Do we actually need that, considering that we have static IPs? ## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true; useDHCP = mkDefault true;
## Disable the default firewall and use nftables instead, with a custom interfaces = {
## Procolix-made ruleset. eth0 = {
ipv4 = {
addresses = [
{
inherit (config.fediversityVm.ipv4) address prefixLength;
}
];
};
ipv6 = {
addresses = [
{
inherit (config.fediversityVm.ipv6) address prefixLength;
}
];
};
};
};
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = "eth0";
};
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = "eth0";
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
firewall.enable = false; firewall.enable = false;
nftables = { nftables = {
enable = true; enable = true;
rulesetFile = ./nftables-ruleset.nft; rulesetFile = ./nftables-ruleset.nft;
}; };
}
## IPv4
(mkIf config.fediversityVm.ipv4.enable {
interfaces.${config.fediversityVm.ipv4.interface}.ipv4.addresses = [
{ inherit (config.fediversityVm.ipv4) address prefixLength; }
];
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = config.fediversityVm.ipv4.interface;
}; };
nameservers = [
"95.215.185.6"
"95.215.185.7"
];
})
## IPv6
(mkIf config.fediversityVm.ipv6.enable {
interfaces.${config.fediversityVm.ipv6.interface}.ipv6.addresses = [
{ inherit (config.fediversityVm.ipv6) address prefixLength; }
];
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = config.fediversityVm.ipv6.interface;
};
nameservers = [
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
})
];
}; };
} }

View file

@ -1,13 +1,5 @@
{ {
config,
...
}:
{
_class = "nixos";
users.users = { users.users = {
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = { procolix = {
isNormalUser = true; isNormalUser = true;
extraGroups = [ "wheel" ]; extraGroups = [ "wheel" ];

View file

@ -6,8 +6,6 @@ let
in in
{ {
# `config` not set and imported from multiple places: no fixed module class
options.fediversityVm = { options.fediversityVm = {
########################################################################## ##########################################################################
@ -20,13 +18,16 @@ in
''; '';
}; };
isFediversityVm = mkOption { proxmox = mkOption {
type = types.bool; type = types.nullOr (
types.enum [
"procolix"
"fediversity"
]
);
description = '' description = ''
Whether the machine is a Fediversity VM or not. This is used to The Proxmox instance. This is used for provisioning only and should be
determine whether the machine should be provisioned via Proxmox or not. set to `null` if the machine is not a VM.
Machines that are _not_ Fediversity VM could be physical machines, or
VMs that live outside Fediversity, eg. on Procolix's Proxmox.
''; '';
}; };
@ -88,17 +89,6 @@ in
}; };
ipv4 = { ipv4 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv4 network.
'';
default = "eth0";
};
address = mkOption { address = mkOption {
description = '' description = ''
The IP address of the machine, version 4. It will be injected as a The IP address of the machine, version 4. It will be injected as a
@ -124,17 +114,6 @@ in
}; };
ipv6 = { ipv6 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv6 network.
'';
default = "eth0";
};
address = mkOption { address = mkOption {
description = '' description = ''
The IP address of the machine, version 6. It will be injected as a The IP address of the machine, version 6. It will be injected as a

View file

@ -1,9 +1,6 @@
{ {
inputs,
lib, lib,
config, config,
keys,
secrets,
... ...
}: }:
@ -11,11 +8,15 @@ let
inherit (lib) attrValues elem mkDefault; inherit (lib) attrValues elem mkDefault;
inherit (lib.attrsets) concatMapAttrs optionalAttrs; inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix; inherit (lib.strings) removeSuffix;
sources = import ../../npins;
inherit (sources) nixpkgs agenix disko;
secretsPrefix = ../../secrets;
secrets = import (secretsPrefix + "/secrets.nix");
keys = import ../../keys;
in in
{ {
_class = "nixops4Resource";
imports = [ ./options.nix ]; imports = [ ./options.nix ];
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name}; fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
@ -25,39 +26,38 @@ in
hostPublicKey = config.fediversityVm.hostPublicKey; hostPublicKey = config.fediversityVm.hostPublicKey;
}; };
inherit (inputs) nixpkgs; inherit nixpkgs;
## The configuration of the machine. We strive to keep in this file only the ## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else ## options that really need to be injected from the resource. Everything else
## should go into the `./nixos` subdirectory. ## should go into the `./nixos` subdirectory.
nixos.module = { nixos.module = {
imports = [ imports = [
(import "${agenix}/modules/age.nix")
(import "${disko}/module.nix")
./options.nix ./options.nix
./nixos ./nixos
./proxmox-qemu-vm.nix
]; ];
## Inject the shared options from the resource's `config` into the NixOS ## Inject the shared options from the resource's `config` into the NixOS
## configuration. ## configuration.
fediversityVm = config.fediversityVm; fediversityVm = config.fediversityVm;
## Read all the secrets, filter the ones that are supposed to be readable with ## Read all the secrets, filter the ones that are supposed to be readable
## public key, and create a mapping from `<name>.file` to the absolute path of ## with this host's public key, and add them correctly to the configuration
## the secret's file. ## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs ( age.secrets = concatMapAttrs (
name: secret: name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) { optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
${removeSuffix ".age" name}.file = secrets.rootPath + "/${name}"; ${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
} })
) secrets.mapping; ) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider ## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo. ## supports users with password-less sudo.
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [ users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines # allow our panel vm access to the test machines
keys.panel keys.panel
# allow continuous deployment access
keys.cd
]; ];
}; };

View file

@ -1,9 +1,6 @@
{ {
inputs, inputs,
lib, lib,
sources,
keys,
secrets,
... ...
}: }:
@ -14,38 +11,37 @@ let
mkOption mkOption
evalModules evalModules
filterAttrs filterAttrs
mapAttrs'
deepSeq
; ;
inherit (lib.attrsets) genAttrs; inherit (lib.attrsets) genAttrs;
sources = import ../../npins;
commonResourceModule = { ## Given a machine's name and whether it is a test VM, make a resource module,
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch ## except for its missing provider. (Depending on the use of that resource, we
# flake-parts and have our own data model for how the project is organised ## will provide a different one.)
# internally makeResourceModule =
_module.args = { { vmName, isTestVm }:
inherit {
inputs imports =
keys [
secrets
sources
;
};
## FIXME: It would be preferrable to have those `sources`-related imports in
## the modules that use them. However, doing so triggers infinite recursions
## because of the way we propagate `sources`. `sources` must be propagated by
## means of `specialArgs`, but this requires a bigger change.
nixos.module.imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
"${sources.home-manager}/nixos"
];
imports = [
./common/resource.nix ./common/resource.nix
]
++ (
if isTestVm then
[
./test-machines/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
# allow our panel vm access to the test machines
(import ../keys).panel
]; ];
}
]
else
[
./machines/${vmName}
]
);
fediversityVm.name = vmName;
}; };
## Given a list of machine names, make a deployment with those machines' ## Given a list of machine names, make a deployment with those machines'
@ -59,8 +55,10 @@ let
type = providers.local.exec; type = providers.local.exec;
imports = [ imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos inputs.nixops4-nixos.modules.nixops4Resource.nixos
commonResourceModule (makeResourceModule {
../machines/dev/${vmName} inherit vmName;
isTestVm = false;
})
]; ];
}); });
}; };
@ -77,29 +75,21 @@ let
fediversity = import ../services/fediversity; fediversity = import ../services/fediversity;
} }
{ {
garageConfigurationResource = { garageConfigurationResource = makeResourceModule {
imports = [ vmName = "test01";
commonResourceModule isTestVm = true;
../machines/operator/test01
];
}; };
mastodonConfigurationResource = { mastodonConfigurationResource = makeResourceModule {
imports = [ vmName = "test06"; # somehow `test02` has a problem - use test06 instead
commonResourceModule isTestVm = true;
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
];
}; };
peertubeConfigurationResource = { peertubeConfigurationResource = makeResourceModule {
imports = [ vmName = "test05";
commonResourceModule isTestVm = true;
../machines/operator/test05
];
}; };
pixelfedConfigurationResource = { pixelfedConfigurationResource = makeResourceModule {
imports = [ vmName = "test04";
commonResourceModule isTestVm = true;
../machines/operator/test04
];
}; };
}; };
@ -112,67 +102,56 @@ let
## this is only needed to expose NixOS configurations for provisioning ## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4. ## purposes, and eventually all of this should be handled by NixOps4.
options = { options = {
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { }; nixpkgs = mkOption { };
ssh = mkOption { }; ssh = mkOption { };
}; };
}; };
makeResourceConfig = makeResourceConfig =
{ vmName, isTestVm }: vm:
(evalModules { (evalModules {
modules = [ modules = [
nixops4ResourceNixosMockOptions nixops4ResourceNixosMockOptions
commonResourceModule (makeResourceModule vm)
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
]; ];
}).config; }).config;
## Given a VM name, make a NixOS configuration for this machine. ## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration = makeConfiguration =
isTestVm: vmName: isTestVm: vmName:
import "${sources.nixpkgs}/nixos" { let
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module; inherit (sources) nixpkgs;
system = "x86_64-linux"; in
import "${nixpkgs}/nixos" {
modules = [
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
];
}; };
makeVmOptions = makeVmOptions = isTestVm: vmName: {
isTestVm: vmName: inherit ((makeResourceConfig { inherit vmName isTestVm; }).fediversityVm)
let proxmox
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
in
if config.isFediversityVm then
{
inherit (config)
vmId vmId
description description
sockets sockets
cores cores
memory memory
diskSize diskSize
hostPublicKey hostPublicKey
unsafeHostPrivateKey unsafeHostPrivateKey
; ;
} };
else
null;
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path)); listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev; machines = listSubdirectories ./machines;
testMachines = listSubdirectories ../machines/operator; testMachines = listSubdirectories ./test-machines;
nixosConfigurations =
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true);
vmOptions =
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
(genAttrs machines (makeVmOptions false) // genAttrs testMachines (makeVmOptions true));
in in
{ {
_class = "flake";
## - Each normal or test machine gets a NixOS configuration. ## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry. ## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment. ## - Each normal machine gets a deployment.
@ -192,23 +171,10 @@ in
) )
); );
}; };
flake = { inherit nixosConfigurations vmOptions; }; flake.nixosConfigurations =
genAttrs machines (makeConfiguration false)
perSystem = // genAttrs testMachines (makeConfiguration true);
{ pkgs, ... }: flake.vmOptions =
{ genAttrs machines (makeVmOptions false)
checks = // genAttrs testMachines (makeVmOptions true);
mapAttrs' (name: nixosConfiguration: {
name = "nixosConfigurations-${name}";
value = nixosConfiguration.config.system.build.toplevel;
}) nixosConfigurations
// mapAttrs' (name: vmOptions: {
name = "vmOptions-${name}";
## Check that VM options builds/evaluates correctly. `deepSeq e1
## e2` evaluates `e1` strictly in depth before returning `e2`. We
## use this trick because checks need to be derivations, which VM
## options are not.
value = deepSeq vmOptions pkgs.hello;
}) vmOptions;
};
} }

View file

@ -7,10 +7,9 @@ Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description Machine | Proxmox | Description
--------|---------|------------- --------|---------|-------------
[`fedi200`](./dev/fedi200) | fediversity | Testing machine for Hans [`fedi200`](./fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./dev/fedi201) | fediversity | FediPanel [`fedi201`](./fedi201) | fediversity | FediPanel
[`vm02116`](./dev/vm02116) | procolix | Forgejo [`vm02116`](./vm02116) | procolix | Forgejo
[`vm02187`](./dev/vm02187) | procolix | Wiki [`vm02187`](./vm02187) | procolix | Wiki
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
This table excludes all machines with names starting with `test`. This table excludes all machines with names starting with `test`.

View file

@ -20,7 +20,7 @@ vmOptions=$(
cd .. cd ..
nix eval \ nix eval \
--impure --raw --expr " --impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ../.)).vmOptions builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
" \ " \
--log-format raw --quiet --log-format raw --quiet
) )
@ -32,12 +32,11 @@ for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1) description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
# shellcheck disable=SC2016 # shellcheck disable=SC2016
printf '[`%s`](./dev/%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description" printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
fi fi
done done
cat <<\EOF cat <<\EOF
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
This table excludes all machines with names starting with `test`. This table excludes all machines with names starting with `test`.
EOF EOF

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "fedi200";
isFediversityVm = true;
vmId = 200; vmId = 200;
proxmox = "fediversity";
description = "Testing machine for Hans"; description = "Testing machine for Hans";
domain = "abundos.eu"; domain = "abundos.eu";

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "fedi201";
isFediversityVm = true;
vmId = 201; vmId = 201;
proxmox = "fediversity";
description = "FediPanel"; description = "FediPanel";
domain = "abundos.eu"; domain = "abundos.eu";

View file

@ -6,8 +6,6 @@ let
name = "panel"; name = "panel";
in in
{ {
_class = "nixos";
imports = [ imports = [
(import ../../../panel { }).module (import ../../../panel { }).module
]; ];

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "vm02116";
isFediversityVm = false;
vmId = 2116; vmId = 2116;
proxmox = "procolix";
description = "Forgejo"; description = "Forgejo";
ipv4.address = "185.206.232.34"; ipv4.address = "185.206.232.34";

View file

@ -5,8 +5,6 @@ let
in in
{ {
_class = "nixos";
services.forgejo = { services.forgejo = {
enable = true; enable = true;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "vm02187";
isFediversityVm = false;
vmId = 2187; vmId = 2187;
proxmox = "procolix";
description = "Wiki"; description = "Wiki";
ipv4.address = "185.206.232.187"; ipv4.address = "185.206.232.187";

View file

@ -1,8 +1,6 @@
{ config, ... }: { config, ... }:
{ {
_class = "nixos";
services.phpfpm.pools.mediawiki.phpOptions = '' services.phpfpm.pools.mediawiki.phpOptions = ''
upload_max_filesize = 1024M; upload_max_filesize = 1024M;
post_max_size = 1024M; post_max_size = 1024M;

View file

@ -15,6 +15,7 @@ let
installer = installer =
{ {
config,
pkgs, pkgs,
lib, lib,
... ...

View file

@ -179,9 +179,15 @@ grab_vm_options () {
--log-format raw --quiet --log-format raw --quiet
) )
proxmox=$(echo "$options" | jq -r .proxmox)
vm_id=$(echo "$options" | jq -r .vmId) vm_id=$(echo "$options" | jq -r .vmId)
description=$(echo "$options" | jq -r .description) description=$(echo "$options" | jq -r .description)
if [ "$proxmox" != fediversity ]; then
die "I do not know how to provision things that are not Fediversity VMs,
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
fi
sockets=$(echo "$options" | jq -r .sockets) sockets=$(echo "$options" | jq -r .sockets)
cores=$(echo "$options" | jq -r .cores) cores=$(echo "$options" | jq -r .cores)
memory=$(echo "$options" | jq -r .memory) memory=$(echo "$options" | jq -r .memory)

View file

@ -167,10 +167,16 @@ grab_vm_options () {
--log-format raw --quiet --log-format raw --quiet
) )
proxmox=$(echo "$options" | jq -r .proxmox)
vm_id=$(echo "$options" | jq -r .vmId) vm_id=$(echo "$options" | jq -r .vmId)
printf 'done grabing VM options for VM %s. Got id: %d.\n' \ if [ "$proxmox" != fediversity ]; then
"$vm_name" "$vm_id" die "I do not know how to remove things that are not Fediversity VMs,
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
fi
printf 'done grabing VM options for VM %s. Found VM %d on %s Proxmox.\n' \
"$vm_name" "$vm_id" "$proxmox"
fi fi
} }

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test01";
isFediversityVm = true;
vmId = 7001; vmId = 7001;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test02";
isFediversityVm = true;
vmId = 7002; vmId = 7002;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test03";
isFediversityVm = true;
vmId = 7003; vmId = 7003;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test04";
isFediversityVm = true;
vmId = 7004; vmId = 7004;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test05";
isFediversityVm = true;
vmId = 7005; vmId = 7005;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test06";
isFediversityVm = true;
vmId = 7006; vmId = 7006;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test11";
isFediversityVm = true;
vmId = 7011; vmId = 7011;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test12";
isFediversityVm = true;
vmId = 7012; vmId = 7012;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test13";
isFediversityVm = true;
vmId = 7013; vmId = 7013;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1,10 +1,7 @@
{ {
_class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test14";
isFediversityVm = true;
vmId = 7014; vmId = 7014;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlsYTtMx3hFO8B5B8iHaXL2JKj9izHeC+/AMhIWXBPs cd-age

View file

@ -35,5 +35,4 @@ in
contributors = collectKeys ./contributors; contributors = collectKeys ./contributors;
systems = collectKeys ./systems; systems = collectKeys ./systems;
panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub); panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub);
cd = removeTrailingWhitespace (readFile ./cd-ssh-key.pub);
} }

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.keys = import ./.;
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFXQW5fxJoNY9wtTMsNExgbAbvyljIRGBLjY+USh/0A

View file

@ -1,4 +0,0 @@
# Machines
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.

View file

@ -1,72 +0,0 @@
{ lib, ... }:
let
inherit (lib) mkDefault mkForce;
in
{
_class = "nixops4Resource";
# NOTE: This needs an SSH config entry `forgejo-ci` to locate and access the
# machine. This is because different people access the machine in different
# way (eg. via a proxy vs. via Procolix's VPN). This might look like:
#
# Host forgejo-ci
# HostName 45.142.234.216
# HostKeyAlias forgejo-ci
#
# The `HostKeyAlias` statement is crucial. Without it, deployment will fail
# with the SSH error “Host key verification failed”.
ssh.host = mkForce "forgejo-ci";
fediversityVm = {
name = "forgejo-ci";
domain = "procolix.com";
isFediversityVm = false;
ipv4 = {
interface = "enp1s0f0";
address = "192.168.201.65";
prefixLength = 24;
gateway = "192.168.201.1";
};
ipv6.enable = false;
};
nixos.module =
{ config, ... }:
{
_class = "nixos";
imports = [
./forgejo-actions-runner.nix
];
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
networking = {
nftables.enable = mkForce false;
hostId = "1d6ea552";
};
## NOTE: This is a physical machine, so is not covered by disko
fileSystems."/" = lib.mkForce {
device = "rpool/root";
fsType = "zfs";
};
fileSystems."/home" = {
device = "rpool/home";
fsType = "zfs";
};
fileSystems."/boot" = lib.mkForce {
device = "/dev/disk/by-uuid/50B2-DD3F";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
];
};
};
}

View file

@ -1,47 +0,0 @@
{ pkgs, config, ... }:
{
_class = "nixos";
services.gitea-actions-runner = {
package = pkgs.forgejo-actions-runner;
instances.default = {
enable = true;
name = config.networking.fqdn;
url = "https://git.fediversity.eu";
tokenFile = config.age.secrets.forgejo-runner-token.path;
settings = {
log.level = "info";
runner = {
file = ".runner";
# Take only 1 job at a time to avoid clashing NixOS tests, see #362
capacity = 1;
timeout = "3h";
insecure = false;
fetch_timeout = "5s";
fetch_interval = "2s";
};
};
## This runner supports Docker (with a default Ubuntu image) and native
## modes. In native mode, it contains a few default packages.
labels = [
"docker:docker://node:16-bullseye"
"native:host"
];
hostPackages = with pkgs; [
bash
git
nix
nodejs
];
};
};
## For the Docker mode of the runner.
virtualisation.docker.enable = true;
}

View file

@ -1,54 +0,0 @@
## This file contains a tweak of flake-parts's `mkFlake` function to splice in
## sources taken from npins.
## NOTE: Much of the logic in this file feels like it should be not super
## specific to fediversity. Could it make sense to extract the core of this to
## another place it feels closer to in spirit, such as @fricklerhandwerk's
## flake-inputs (which this code already depends on anyway, and which already
## contained two distinct helpers for migrating away from flakes)? cf
## https://git.fediversity.eu/Fediversity/Fediversity/pulls/447#issuecomment-8671
inputs@{ self, ... }:
let
sources = import ./npins;
inherit (import sources.flake-inputs) import-flake;
# XXX(@fricklerhandwerk): this atrocity is required to splice in a foreign Nixpkgs via flake-parts
# XXX - this is just importing a flake
nixpkgs = import-flake { src = sources.nixpkgs; };
# XXX - this overrides the inputs attached to `self`
inputs' = self.inputs // {
nixpkgs = nixpkgs;
};
self' = self // {
inputs = inputs';
};
flake-parts-lib = import "${sources.flake-parts}/lib.nix" { inherit (nixpkgs) lib; };
in
flakeModule:
flake-parts-lib.mkFlake
{
# XXX - finally we override the overall set of `inputs` -- we need both:
# `flake-parts obtains `nixpkgs` from `self.inputs` and not from `inputs`.
inputs = inputs // {
inherit nixpkgs;
};
self = self';
specialArgs = {
inherit sources;
};
}
{
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [ flakeModule ];
}

View file

@ -58,20 +58,17 @@
"hash": "0wbx518d2x54yn4xh98cgm65wvj0gpy6nia6ra7ns4j63hx14fkq" "hash": "0wbx518d2x54yn4xh98cgm65wvj0gpy6nia6ra7ns4j63hx14fkq"
}, },
"flake-inputs": { "flake-inputs": {
"type": "GitRelease", "type": "Git",
"repository": { "repository": {
"type": "GitHub", "type": "GitHub",
"owner": "fricklerhandwerk", "owner": "fricklerhandwerk",
"repo": "flake-inputs" "repo": "flake-inputs"
}, },
"pre_releases": false, "branch": "main",
"version_upper_bound": null,
"release_prefix": null,
"submodules": false, "submodules": false,
"version": "4.1", "revision": "6461d0b56e790bf289af07c5e5261abbf4f536af",
"revision": "ad02792f7543754569fe2fd3d5787ee00ef40be2", "url": "https://github.com/fricklerhandwerk/flake-inputs/archive/6461d0b56e790bf289af07c5e5261abbf4f536af.tar.gz",
"url": "https://api.github.com/repos/fricklerhandwerk/flake-inputs/tarball/4.1", "hash": "03mwisvr1mc3nd33nvg4bvcyxjxpm4lwhwym39r0768cm1007ixl"
"hash": "1j57avx2mqjnhrsgq3xl7ih8v7bdhz1kj3min6364f486ys048bm"
}, },
"flake-parts": { "flake-parts": {
"type": "Git", "type": "Git",
@ -112,19 +109,6 @@
"url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz", "url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz",
"hash": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w" "hash": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w"
}, },
"home-manager": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "home-manager"
},
"branch": "master",
"submodules": false,
"revision": "863842639722dd12ae9e37ca83bcb61a63b36f6c",
"url": "https://github.com/nix-community/home-manager/archive/863842639722dd12ae9e37ca83bcb61a63b36f6c.tar.gz",
"hash": "0rw9n8d4v87pzlmw7ws15f0sldb51fd9528skpbzmrzl4pinsgij"
},
"htmx": { "htmx": {
"type": "GitRelease", "type": "GitRelease",
"repository": { "repository": {

2
panel/.gitignore vendored
View file

@ -11,5 +11,5 @@ db.sqlite3
src/db.sqlite3 src/db.sqlite3
src/static src/static
src/panel/static/htmx* src/panel/static/htmx*
src/panel/configuration/schema.* src/panel/configuration/schema.py
.credentials .credentials

View file

@ -20,15 +20,8 @@ in
packages = [ packages = [
pkgs.npins pkgs.npins
manage manage
# NixOps4 and its dependencies
pkgs.nixops4
pkgs.nix
pkgs.openssh
]; ];
env = { env = import ./env.nix { inherit lib pkgs; } // {
DEPLOYMENT_FLAKE = toString ../.;
DEPLOYMENT_NAME = "test";
NPINS_DIRECTORY = toString ../npins; NPINS_DIRECTORY = toString ../npins;
CREDENTIALS_DIRECTORY = toString ./.credentials; CREDENTIALS_DIRECTORY = toString ./.credentials;
DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3"; DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3";
@ -45,7 +38,7 @@ in
''; '';
}; };
module = ./nix/configuration.nix; module = import ./nix/configuration.nix;
tests = pkgs.callPackage ./nix/tests.nix { }; tests = pkgs.callPackage ./nix/tests.nix { };
# re-export inputs so they can be overridden granularly # re-export inputs so they can be overridden granularly

Some files were not shown because too many files have changed in this diff Show more