Compare commits

..

1 commit

Author SHA1 Message Date
ef345a7fcb WIP: sketch domain data model 2025-06-10 15:45:14 +02:00
139 changed files with 690 additions and 1777 deletions

View file

@ -1,24 +0,0 @@
name: deploy-infra
on:
workflow_dispatch: # allows manual triggering
push:
branches:
# - main
jobs:
deploy:
runs-on: native
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key to access age secrets
run: |
env
mkdir -p ~/.ssh
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'nixops4 apply default'

View file

@ -15,23 +15,17 @@ jobs:
- uses: actions/checkout@v4
- run: nix-build -A tests
check-data-model:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-peertube:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build services -A tests.peertube
- run: cd services && nix-build -A tests.peertube
check-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build panel -A tests
- run: cd panel && nix-build -A tests
check-deployment-basic:
runs-on: native
@ -44,9 +38,3 @@ jobs:
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-cli -L
check-deployment-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L

View file

@ -2,23 +2,20 @@ name: update-dependencies
on:
workflow_dispatch: # allows manual triggering
# FIXME: re-enable when manual run works
# schedule:
# - cron: '0 0 1 * *' # monthly
schedule:
- cron: '0 0 1 * *' # monthly
jobs:
lockfile:
runs-on: native
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update pins
run: nix-shell --run "npins update"
- name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
- name: Install Nix
uses: cachix/install-nix-action@v31
- name: Install npins
run: nix profile install 'nixpkgs#npins'
- name: Update npins sources
uses: getchoo/update-npins@v0
with:
remote-instance-api-version: v1
token: "${{ secrets.DEPLOY_KEY }}"
branch: npins-update
commit-message: "npins: update sources"
title: "npins: update sources"

View file

@ -154,3 +154,6 @@ details as to what they are for. As an overview:
- [`services/`](./services) contains our effort to make Fediverse applications
work seemlessly together in our specific setting.
- [`website/`](./website) contains the framework and the content of [the
Fediversity website](https://fediversity.eu/)

View file

@ -10,8 +10,6 @@ let
gitignore
;
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
@ -43,30 +41,6 @@ in
shell = pkgs.mkShellNoCC {
inherit (pre-commit-check) shellHook;
buildInputs = pre-commit-check.enabledPackages;
packages =
let
test-loop = pkgs.writeShellApplication {
name = "test-loop";
runtimeInputs = [
pkgs.watchexec
pkgs.nix-unit
];
text = ''
watchexec -w ${builtins.toString ./.} -- nix-unit ${builtins.toString ./deployment/data-model-test.nix} "$@"
'';
};
in
[
pkgs.npins
pkgs.nil
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
pkgs.nix-unit
test-loop
nixops4.packages.${system}.default
];
};
tests = {

View file

@ -3,13 +3,6 @@
This directory contains work to generate a full Fediversity deployment from a minimal configuration.
This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services.
## Data model
The core piece of the project is the [Fediversity data model](./data-model.nix), which describes all entities and their interactions.
What can be done with it is exemplified in the [evaluation tests](./data-model-test.nix).
Run `test-loop` in the development environment when hacking on the data model or adding tests.
## Checks
There are three levels of deployment checks: `basic`, `cli`, `panel`.
@ -116,8 +109,8 @@ flowchart LR
target_machines -->|get certs| acme
```
### Service deployment check from the FediPanel
### [WIP] Service deployment check from the panel
This is a full deployment check running the [FediPanel](../panel) on the deployer machine, deploying some services through it and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
This is a full deployment check running the panel on the deployer machine, deploying some services through the panel and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
It builds upon the basic and CLI deployment checks, the only difference being that `deployer` runs NixOps4 only indirectly via the panel, and the `client` node is the one that triggers the deployment via a browser, the way a human would.
It builds upon the basic and CLI deployment checks.

View file

@ -0,0 +1,50 @@
{
lib,
...
}:
let
inherit (lib) types mkOption;
in
{
options = {
enable = lib.mkEnableOption "Fediversity configuration";
applications = mkOption {
description = "Collection of NixOS modules, each implementing a Fediversity application";
example.hello = {
enable = true;
module = {pkgs, ...}: {
environment.systemPackages = [ pkgs.hello ];
};
};
type = with types; attrsOf (submoduleWith {
class = "nixos";
description = "A Fediversity application";
modules = [
(application: {
options = {
enable = mkOption {
type = types.bool;
};
module = mkOption {
default = "The NixOS module to compose into an operator's configuration"
type = types.deferredModule;
};
components = mkOption {
type = with types; attrsOf (attrTag {
file-system-state = mkOption {
desciption = "files stored by the application";
type = with types; attrsOf (submodule {
options.minSize = types.bytes;
});
example = {
"/foo/bar/baz" = { minSize = 1024; };
};
};
});
};
};
})
];
});
};
}

View file

@ -1,8 +0,0 @@
{
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
}

View file

@ -1,14 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
}

View file

@ -1,36 +0,0 @@
{
inputs,
sources,
lib,
providers,
...
}:
let
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
in
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = lib.genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
}

View file

@ -0,0 +1,54 @@
{
self,
inputs,
lib,
...
}:
let
inherit (lib) genAttrs;
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = /. + (builtins.unsafeDiscardStringContext self);
pathFromRoot = ./.;
in
{
perSystem =
{ pkgs, ... }:
{
checks.deployment-basic = pkgs.testers.runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args.inputs = inputs;
inherit targetMachines pathToRoot pathFromRoot;
};
};
nixops4Deployments.check-deployment-basic =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
_module.args.inputs = inputs;
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
};
}

View file

@ -1,22 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-basic = {
imports = [ ./deployment/check/basic/deployment.nix ];
_module.args = { inherit inputs sources; };
};
}
);
}

View file

@ -1,15 +1,8 @@
{ inputs, lib, ... }:
{ inputs, ... }:
{
_class = "nixosTest";
name = "deployment-basic";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
];
nodes.deployer =
{ pkgs, ... }:
{
@ -17,12 +10,6 @@
inputs.nixops4.packages.${pkgs.system}.default
];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1 @@
## This is a placeholder file. It will be overwritten by the test.

View file

@ -1,59 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
}

View file

@ -0,0 +1,87 @@
{
self,
inputs,
lib,
...
}:
let
inherit (builtins) fromJSON readFile listToAttrs;
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = /. + (builtins.unsafeDiscardStringContext self);
pathFromRoot = ./.;
enableAcme = true;
in
{
perSystem =
{ pkgs, ... }:
{
checks.deployment-cli = pkgs.testers.runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args.inputs = inputs;
inherit
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
};
};
nixops4Deployments =
let
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args.inputs = inputs;
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
(fromJSON (readFile ../../configuration.sample.json) // args);
in
{
check-deployment-cli-nothing = makeTestDeployment { };
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
mastodon.enable = true;
pixelfed.enable = true;
};
check-deployment-cli-peertube = makeTestDeployment {
peertube.enable = true;
};
};
}

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments = import ./deployment/check/cli/deployments.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,9 +1,4 @@
{
inputs,
hostPkgs,
lib,
...
}:
{ inputs, hostPkgs, ... }:
let
## Some places need a dummy file that will in fact never be used. We create
@ -12,25 +7,8 @@ let
in
{
_class = "nixosTest";
name = "deployment-cli";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployments.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in panel test)
../../default.nix
../../options.nix
../../configuration.sample.json
../../../services/fediversity
];
nodes.deployer =
{ pkgs, ... }:
{

View file

@ -3,7 +3,6 @@
lib,
pkgs,
config,
sources,
...
}:
@ -17,8 +16,6 @@ let
in
{
_class = "nixos";
imports = [ ./sharedOptions.nix ];
options.system.extraDependenciesFromModule = mkOption {
@ -54,13 +51,11 @@ in
system.extraDependencies =
[
inputs.nixops4
inputs.nixops4-nixos
inputs.nixpkgs
sources.flake-parts
sources.flake-inputs
sources.git-hooks
"${inputs.flake-parts}"
"${inputs.flake-parts.inputs.nixpkgs-lib}"
"${inputs.nixops4}"
"${inputs.nixops4-nixos}"
"${inputs.nixpkgs}"
pkgs.stdenv
pkgs.stdenvNoCC
@ -77,7 +72,7 @@ in
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";
_module.args = { inherit inputs sources; };
_module.args.inputs = inputs;
enableAcme = config.enableAcme;
acmeNodeIP = config.acmeNodeIP;
}

View file

@ -3,7 +3,6 @@
lib,
config,
hostPkgs,
sources,
...
}:
@ -13,7 +12,6 @@ let
toJSON
;
inherit (lib)
types
fileset
mkOption
genAttrs
@ -28,6 +26,14 @@ let
forConcat = xs: f: concatStringsSep "\n" (map f xs);
## The whole repository, with the flake at its root.
## FIXME: We could probably have fileset be the union of ./. with flake.nix
## and flake.lock - I doubt we need anything else.
src = fileset.toSource {
fileset = config.pathToRoot;
root = config.pathToRoot;
};
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
@ -36,8 +42,6 @@ let
in
{
_class = "nixosTest";
imports = [
./sharedOptions.nix
];
@ -46,46 +50,16 @@ in
## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { };
sourceFileset = mkOption {
## REVIEW: Upstream to nixpkgs?
type = types.mkOptionType {
name = "fileset";
description = "fileset";
descriptionClass = "noun";
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
merge = (_: defs: fileset.unions (map (x: x.value) defs));
};
description = ''
A fileset that will be copied to the deployer node in the current
working directory. This should contain all the files that are
necessary to run that particular test, such as the NixOS
modules necessary to evaluate a deployment.
'';
};
};
config = {
sourceFileset = fileset.unions [
# NOTE: not the flake itself; it will be overridden.
../../../mkFlake.nix
../../../flake.lock
../../../npins
./sharedOptions.nix
./targetNode.nix
./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
nodes =
{
deployer = {
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
_module.args.inputs = inputs;
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
};
@ -112,7 +86,7 @@ in
genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
_module.args.inputs = inputs;
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
@ -126,16 +100,8 @@ in
${n}.wait_for_unit("multi-user.target")
'')}
## A subset of the repository that is necessary for this test. It will be
## copied inside the test. The smaller this set, the faster our CI, because we
## won't need to re-run when things change outside of it.
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${
fileset.toSource {
root = ../../..;
fileset = config.sourceFileset;
}
}/* .")
deployer.succeed("cp -r --no-preserve=mode ${src}/* .")
with subtest("Configure the network"):
${forConcat config.targetMachines (
@ -153,6 +119,7 @@ in
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
deployer.succeed(f"echo '{deployer_key}' > ${config.pathFromRoot}/deployer.pub")
${forConcat config.targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')}
@ -165,16 +132,11 @@ in
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
##
## NOTE: We use the repository as-is (cf `src` above), overriding only
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
## lock file to use locally available inputs, as we cannot download them.
##
with subtest("Override the flake and its lock"):
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
with subtest("Override the lock"):
deployer.succeed("""
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input flake-parts ${inputs.flake-parts} \
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
@ -186,6 +148,9 @@ in
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
\
--override-input nixpkgs ${inputs.nixpkgs} \
--override-input git-hooks ${inputs.git-hooks} \
;
""")

View file

@ -11,7 +11,6 @@ let
inherit (lib) mkOption types;
in
# `config` not set and imported from multiple places: no fixed module class
{
options = {
targetMachines = mkOption {

View file

@ -12,8 +12,6 @@ let
in
{
_class = "nixos";
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")

View file

@ -2,7 +2,6 @@
inputs,
lib,
config,
sources,
...
}:
@ -13,8 +12,6 @@ let
in
{
_class = "nixops4Resource";
imports = [ ./sharedOptions.nix ];
options = {
@ -41,7 +38,7 @@ in
(lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json"))
];
_module.args = { inherit inputs sources; };
_module.args.inputs = inputs;
enableAcme = config.enableAcme;
acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip"));

View file

@ -1,11 +0,0 @@
{
targetMachines = [
"garage"
"mastodon"
"peertube"
"pixelfed"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -1,19 +0,0 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -1,58 +0,0 @@
{
inputs,
sources,
lib,
}:
let
inherit (builtins) fromJSON listToAttrs;
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
makeTargetResource = nodeName: {
imports = [ ../common/targetResource.nix ];
_module.args = { inherit inputs sources; };
inherit
nodeName
pathToRoot
pathFromRoot
enableAcme
;
};
## The deployment function - what we are here to test!
##
## TODO: Modularise `deployment/default.nix` to get rid of the nested
## function calls.
makeTestDeployment =
args:
(import ../..)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../../../services/fediversity;
}
(listToAttrs (
map (nodeName: {
name = "${nodeName}ConfigurationResource";
value = makeTargetResource nodeName;
}) targetMachines
))
args;
in
makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env == "" then
throw "The DEPLOYMENT environment needs to be set. You do not want to use this deployment unless in the `deployment-panel` NixOS test."
else
env
)
)

View file

@ -1,26 +0,0 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{
inputs,
sources,
lib,
...
}:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-panel = import ./deployment/check/panel/deployment.nix {
inherit inputs sources lib;
};
}
);
}

View file

@ -1,377 +0,0 @@
{
inputs,
lib,
hostPkgs,
config,
...
}:
let
inherit (lib)
getExe
;
## Some places need a dummy file that will in fact never be used. We create
## it here.
dummyFile = hostPkgs.writeText "dummy" "dummy";
panelPort = 8000;
panelUser = "test";
panelEmail = "test@test.com";
panelPassword = "ouiprdaaa43"; # panel's manager complains if too close to username or email
fediUser = "test";
fediEmail = "test@test.com";
fediPassword = "testtest";
fediName = "Testy McTestface";
toPythonBool = b: if b then "True" else "False";
interactWithPanel =
{
baseUri,
enableMastodon,
enablePeertube,
enablePixelfed,
}:
hostPkgs.writers.writePython3Bin "interact-with-panel"
{
libraries = with hostPkgs.python3Packages; [ selenium ];
flakeIgnore = [
"E302" # expected 2 blank lines, found 0
"E303" # too many blank lines
"E305" # expected 2 blank lines after end of function or class
"E501" # line too long (> 79 characters)
"E731" # do not assign lambda expression, use a def
];
}
''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
print("Create and configure driver...")
options = Options()
options.add_argument("--headless")
options.binary_location = "${getExe hostPkgs.firefox-unwrapped}"
service = webdriver.FirefoxService(executable_path="${getExe hostPkgs.geckodriver}")
driver = webdriver.Firefox(options=options, service=service)
driver.set_window_size(1280, 960)
driver.implicitly_wait(360)
driver.command_executor.set_timeout(3600)
print("Open login page...")
driver.get("${baseUri}/login/")
print("Enter username...")
driver.find_element(By.XPATH, "//input[@name = 'username']").send_keys("${panelUser}")
print("Enter password...")
driver.find_element(By.XPATH, "//input[@name = 'password']").send_keys("${panelPassword}")
print("Click Login button...")
driver.find_element(By.XPATH, "//button[normalize-space() = 'Login']").click()
print("Open configuration page...")
driver.get("${baseUri}/configuration/")
# Helpers to actually set and not add or switch input values.
def input_set(elt, keys):
elt.clear()
elt.send_keys(keys)
def checkbox_set(elt, new_value):
if new_value != elt.is_selected():
elt.click()
print("Enable Fediversity...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'enable']"), True)
print("Fill in initialUser info...")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.username']"), "${fediUser}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.password']"), "${fediPassword}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.email']"), "${fediEmail}")
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.displayName']"), "${fediName}")
print("Enable services...")
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
print("Start deployment...")
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
print("Wait for deployment status to show up...")
get_deployment_result = lambda d: d.find_element(By.XPATH, "//div[@id = 'deployment-result']//p")
WebDriverWait(driver, timeout=3660, poll_frequency=10).until(get_deployment_result)
deployment_result = get_deployment_result(driver).get_attribute('innerHTML')
print("Quit...")
driver.quit()
match deployment_result:
case 'Deployment Succeeded':
print("Deployment has succeeded; exiting normally")
exit(0)
case 'Deployment Failed':
print("Deployment has failed; exiting with return code `1`")
exit(1)
case _:
print(f"Unexpected deployment result: {deployment_result}; exiting with return code `2`")
exit(2)
'';
in
{
_class = "nixosTest";
name = "deployment-panel";
sourceFileset = lib.fileset.unions [
./constants.nix
./deployment.nix
# REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files
# in `/deployment`. Maybe we can think of a reorg making things more robust
# here? (comment also in CLI test)
../../default.nix
../../options.nix
../../../services/fediversity
];
## The panel's module sets `nixpkgs.overlays` which clashes with
## `pkgsReadOnly`. We disable it here.
node.pkgsReadOnly = false;
nodes.deployer =
{ pkgs, ... }:
{
imports = [
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;
domain = "deployer";
secrets = {
SECRET_KEY = dummyFile;
};
port = panelPort;
deployment = {
flake = "/run/fedipanel/flake";
name = "check-deployment-panel";
};
};
environment.systemPackages = [ pkgs.expect ];
## FIXME: The following dependencies are necessary but I do not
## understand why they are not covered by the fake node.
system.extraDependencies = with pkgs; [
peertube
peertube.inputDerivation
gixy # a configuration checker for nginx
gixy.inputDerivation
];
system.extraDependenciesFromModule = {
imports = [ ../../../services/fediversity ];
fediversity = {
domain = "fediversity.net"; # would write `dummy` but that would not type
garage.enable = true;
mastodon = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
peertube = {
enable = true;
secretsFile = dummyFile;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
pixelfed = {
enable = true;
s3AccessKeyFile = dummyFile;
s3SecretKeyFile = dummyFile;
};
temp.cores = 1;
temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};
};
nodes.client =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
httpie
dnsutils # for `dig`
openssl
cacert
wget
python3
python3Packages.selenium
firefox-unwrapped
geckodriver
];
security.pki.certificateFiles = [
config.nodes.acme.test-support.acme.caCert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
};
## NOTE: The target machines may need more RAM than the default to handle
## being deployed to, otherwise we get something like:
##
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
## deployer # error: writing to file: No space left on device
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
## deployer # Error: Could not create resource
##
## These values have been trimmed down to the gigabyte.
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
nodes.peertube.virtualisation.memorySize = 5 * 1024;
## FIXME: The test of presence of the services are very simple: we only
## check that there is a systemd service of the expected name on the
## machine. This proves at least that NixOps4 did something, and we cannot
## really do more for now because the services aren't actually working
## properly, in particular because of DNS issues. We should fix the services
## and check that they are working properly.
extraTestScript = ''
## TODO: We want a nicer way to control where the FediPanel consumes its
## flake, which can default to the store but could also be somewhere else if
## someone wanted to change the code of the flake.
##
with subtest("Give the panel access to the flake"):
deployer.succeed("mkdir /run/fedipanel /run/fedipanel/flake >&2")
deployer.succeed("cp -R . /run/fedipanel/flake >&2")
deployer.succeed("chown -R panel:panel /run/fedipanel >&2")
## TODO: I want a programmatic way to provide an SSH key to the panel (and
## therefore NixOps4). This should happen either in the Python code, but
## maybe it is fair that that one picks up on the user's key? It could
## also be in the Nix packaging.
##
with subtest("Set up the panel's SSH keys"):
deployer.succeed("mkdir /home/panel/.ssh >&2")
deployer.succeed("cp -R /root/.ssh/* /home/panel/.ssh >&2")
deployer.succeed("chown -R panel:panel /home/panel/.ssh >&2")
deployer.succeed("chmod 600 /home/panel/.ssh/* >&2")
## TODO: This is a hack to accept the root CA used by Pebble on the client
## machine. Pebble randomizes everything, so the only way to get it is to
## call the /roots/0 endpoint at runtime, leaving not much margin for a nice
## Nixy way of adding the certificate. There is no way around it as this is
## by design in Pebble, showing in fact that Pebble was not the appropriate
## tool for our use and that nixpkgs does not in fact provide an easy way to
## generate _usable_ certificates in NixOS tests. I suggest we merge this,
## and track the task to set it up in a cleaner way. I would tackle this in
## a subsequent PR, and hopefully even contribute this BetterWay(tm) to
## nixpkgs. — Niols
##
with subtest("Set up ACME root CA on client"):
client.succeed("""
cd /etc/ssl/certs
curl -o pebble-root-ca.pem https://acme.test:15000/roots/0
curl -o pebble-intermediate-ca.pem https://acme.test:15000/intermediates/0
{ cat ca-bundle.crt
cat pebble-root-ca.pem
cat pebble-intermediate-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
""")
## TODO: I would hope for a more declarative way to add users. This should
## be handled by the Nix packaging of the FediPanel. — Niols
##
with subtest("Create panel user"):
deployer.succeed("""
expect -c '
spawn manage createsuperuser --username ${panelUser} --email ${panelEmail}
expect "Password: "; send "${panelPassword}\\n";
expect "Password (again): "; send "${panelPassword}\\n"
interact
' >&2
""")
with subtest("Check the status of the services - there should be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with no services enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = false;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - there should still be none"):
garage.fail("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = true;
enablePeertube = false;
enablePixelfed = true;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
garage.succeed("systemctl status garage.service")
mastodon.succeed("systemctl status mastodon-web.service")
peertube.fail("systemctl status peertube.service")
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
with subtest("Run deployment with only Peertube enabled"):
client.succeed("${
interactWithPanel {
baseUri = "https://deployer";
enableMastodon = false;
enablePeertube = true;
enablePixelfed = false;
}
}/bin/interact-with-panel >&2")
with subtest("Check the status of the services - expecting Garage and Peertube"):
garage.succeed("systemctl status garage.service")
mastodon.fail("systemctl status mastodon-web.service")
peertube.succeed("systemctl status peertube.service")
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
'';
}

View file

@ -1,70 +0,0 @@
let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit inputs;
};
modules = [
module
./data-model.nix
];
}).config;
in
{
_class = "nix-unit";
test-eval = {
expr =
let
fediversity = eval (
{ config, ... }:
{
config = {
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options = {
enable = lib.mkEnableOption "Hello in the shell";
};
};
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
};
};
options = {
example-configuration = mkOption {
type = config.configuration;
readOnly = true;
default = {
enable = true;
applications.hello.enable = true;
};
};
};
}
);
in
{
inherit (fediversity)
example-configuration
;
};
expected = {
example-configuration = {
enable = true;
applications.hello.enable = true;
};
};
};
}

View file

@ -1,89 +0,0 @@
{
lib,
config,
...
}:
let
inherit (lib) mkOption types;
inherit (lib.types)
attrsOf
attrTag
deferredModuleWith
submodule
optionType
functionTo
;
functionType = import ./function.nix;
application-resources = {
options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
);
};
};
in
{
_class = "nixops4Deployment";
options = {
applications = mkOption {
description = "Collection of Fediversity applications";
type = attrsOf (
submodule (application: {
_class = "fediversity-application";
options = {
description = mkOption {
description = "Description to be shown in the application overview";
type = types.str;
};
module = mkOption {
description = "Operator-facing configuration options for the application";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-application-config"; } ]; };
};
implementation = mkOption {
description = "Mapping of application configuration to deployment resources, a description of what an application needs to run";
type = application.config.config-mapping.function-type;
};
resources = mkOption {
description = "Compute resources required by an application";
type = functionTo application.config.config-mapping.output-type;
readOnly = true;
default = input: (application.config.implementation input).output;
};
config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType;
readOnly = true;
default = {
input-type = application.config.module;
output-type = application-resources;
};
};
};
})
);
};
configuration = mkOption {
description = "Configuration type declaring options to be set by operators";
type = optionType;
readOnly = true;
default = submodule {
options = {
enable = lib.mkEnableOption {
description = "your Fediversity configuration";
};
applications = lib.mapAttrs (
_name: application:
mkOption {
description = application.description;
type = submodule application.module;
default = { };
}
) config.applications;
};
};
};
};
}

View file

@ -65,8 +65,6 @@ let
cfg = config.deployment;
in
{
_class = "nixops4Deployment";
options = {
deployment = lib.mkOption {
description = ''

View file

@ -1,26 +1,6 @@
{ inputs, sources, ... }:
{
_class = "flake";
perSystem =
{ pkgs, ... }:
{
checks = {
deployment-basic = import ./check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-cli = import ./check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-panel = import ./check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
};
};
imports = [
./check/basic/flake-part.nix
./check/cli/flake-part.nix
];
}

View file

@ -1,37 +0,0 @@
/**
Modular function type
*/
{ config, lib, ... }:
let
inherit (lib) mkOption types;
inherit (types)
deferredModule
submodule
functionTo
optionType
;
in
{
options = {
input-type = mkOption {
type = deferredModule;
};
output-type = mkOption {
type = deferredModule;
};
function-type = mkOption {
type = optionType;
readOnly = true;
default = functionTo (submodule {
options = {
input = mkOption {
type = submodule config.input-type;
};
output = mkOption {
type = submodule config.output-type;
};
};
});
};
};
}

View file

@ -17,8 +17,6 @@ let
inherit (lib) types mkOption;
in
{
_class = "nixops4Deployment";
options = {
enable = lib.mkEnableOption "Fediversity configuration";
domain = mkOption {

View file

@ -0,0 +1,41 @@
{
lib,
...
}:
let
inherit (lib) types mkOption;
in
{
options = {
infrastructure = mkOption {
description = "Infrastructure for Fediversity applications to run on";
type =
with types;
attrsOf (attrTag {
single-nixos-machine-via-usb = mkOption {
type =
with types;
submodule {
# TODO: maybe steal some data structures from NixOS
options = {
hasNetwork = mkOption {
type = types.bool;
};
disks = mkOption {
type =
with types;
attrsOf (submodule {
options.size = mkOption {
type = types.bytes;
};
});
};
};
};
};
});
};
};
}

140
flake.lock generated
View file

@ -59,6 +59,22 @@
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1733328505,
@ -74,7 +90,7 @@
"type": "github"
}
},
"flake-compat_3": {
"flake-compat_4": {
"flake": false,
"locked": {
"lastModified": 1696426674,
@ -127,6 +143,24 @@
}
},
"flake-parts_3": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_3"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_4": {
"inputs": {
"nixpkgs-lib": [
"nixops4-nixos",
@ -167,12 +201,32 @@
"type": "github"
}
},
"git-hooks-nix": {
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1742649964,
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks-nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"gitignore": "gitignore_2",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1737465171,
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
@ -227,6 +281,27 @@
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"gitignore_2": {
"inputs": {
"nixpkgs": [
"nixops4-nixos",
@ -266,8 +341,8 @@
},
"nix": {
"inputs": {
"flake-compat": "flake-compat_2",
"flake-parts": "flake-parts_3",
"flake-compat": "flake-compat_3",
"flake-parts": "flake-parts_4",
"git-hooks-nix": "git-hooks-nix_2",
"nixfmt": "nixfmt",
"nixpkgs": [
@ -341,10 +416,10 @@
},
"nixops4": {
"inputs": {
"flake-parts": "flake-parts_2",
"flake-parts": "flake-parts_3",
"nix": "nix",
"nix-cargo-integration": "nix-cargo-integration",
"nixpkgs": "nixpkgs_2",
"nixpkgs": "nixpkgs_3",
"nixpkgs-old": "nixpkgs-old"
},
"locked": {
@ -363,7 +438,7 @@
},
"nixops4-nixos": {
"inputs": {
"flake-parts": "flake-parts",
"flake-parts": "flake-parts_2",
"git-hooks-nix": "git-hooks-nix",
"nixops4": "nixops4",
"nixops4-nixos": [
@ -445,6 +520,18 @@
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-lib_3": {
"locked": {
"lastModified": 1738452942,
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
}
},
"nixpkgs-old": {
"locked": {
"lastModified": 1735563628,
@ -478,6 +565,22 @@
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1730768919,
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1738410390,
"narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=",
@ -493,6 +596,22 @@
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1740463929,
"narHash": "sha256-4Xhu/3aUdCKeLfdteEHMegx5ooKQvwPHNkOgNCXQrvc=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "5d7db4668d7a0c6cc5fc8cf6ef33b008b2b1ed8b",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"parts": {
"inputs": {
"nixpkgs-lib": [
@ -518,7 +637,7 @@
},
"purescript-overlay": {
"inputs": {
"flake-compat": "flake-compat_3",
"flake-compat": "flake-compat_4",
"nixpkgs": [
"nixops4-nixos",
"nixops4",
@ -561,11 +680,14 @@
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"git-hooks": "git-hooks",
"nixops4": [
"nixops4-nixos",
"nixops4"
],
"nixops4-nixos": "nixops4-nixos"
"nixops4-nixos": "nixops4-nixos",
"nixpkgs": "nixpkgs_4"
}
},
"rust-overlay": {

View file

@ -1,49 +1,72 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11"; # consumed by flake-parts
flake-parts.url = "github:hercules-ci/flake-parts";
git-hooks.url = "github:cachix/git-hooks.nix";
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
"${sources.git-hooks}/flake-module.nix"
inputs.nixops4.modules.flake.default
inputs@{ flake-parts, ... }:
let
sources = import ./npins;
inherit (sources) git-hooks agenix;
in
flake-parts.lib.mkFlake { inherit inputs; } {
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
./deployment/flake-part.nix
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
imports = [
(import "${git-hooks}/flake-module.nix")
inputs.nixops4.modules.flake.default
perSystem =
{
pkgs,
lib,
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
./deployment/flake-part.nix
./infra/flake-part.nix
];
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
perSystem =
{
pkgs,
lib,
inputs',
...
}:
{
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
devShells.default = pkgs.mkShell {
packages = [
pkgs.npins
pkgs.nil
(pkgs.callPackage "${agenix}/pkgs/agenix.nix" { })
pkgs.openssh
pkgs.httpie
pkgs.jq
# exposing this env var as a hack to pass info in from form
(inputs'.nixops4.packages.default.overrideAttrs {
impureEnvVars = [ "DEPLOYMENT" ];
})
];
};
}
);
};
};
}

View file

@ -14,7 +14,7 @@ everything will become much cleaner.
above 100. For instance, `fedi117`.
2. Add a basic configuration for the machine. These typically go in
`machines/dev/<name>/default.nix`. You can look at other `fediXXX` VMs to
`infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
find inspiration. You probably do not need a `nixos.module` option at this
point.
@ -48,7 +48,7 @@ everything will become much cleaner.
7. Regenerate the list of machines:
```
sh machines/machines.md.sh
sh infra/machines.md.sh
```
Commit it with the machine's configuration, public key, etc.

View file

@ -5,9 +5,8 @@ let
in
{
_class = "nixos";
imports = [
./hardware.nix
./networking.nix
./users.nix
];
@ -23,9 +22,4 @@ in
nix.extraOptions = ''
experimental-features = nix-command flakes
'';
boot.loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
}

View file

@ -1,27 +1,20 @@
let
# pulling this in manually over from module args resolves an infinite recursion.
# FIXME: instead untangle `//infra/flake-part.nix` and make it stop passing wild functions.
# move moving towards a portable-services-like pattern where some things are submodules.
# Right now those wild functions are for parameterising a bunch of things,
# and the modular way to do that would be options --
# obviously you can't use those for `imports`,
# so one way to decouple fixpoints is to isolate them into submodules.
# Therefore one approach would be to try to go down the call graph,
# and see where what's currently a function could be a `submodule` field of something else.
sources = import ../../npins;
in
{
_class = "nixos";
{ modulesPath, ... }:
imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
];
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot = {
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
initrd = {
availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];

View file

@ -1,64 +1,63 @@
{ config, lib, ... }:
let
inherit (lib) mkDefault mkIf mkMerge;
inherit (lib) mkDefault;
in
{
_class = "nixos";
config = {
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = mkMerge [
{
hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain;
networking = {
hostName = config.fediversityVm.name;
domain = config.fediversityVm.domain;
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
## REVIEW: Do we actually need that, considering that we have static IPs?
useDHCP = mkDefault true;
## Disable the default firewall and use nftables instead, with a custom
## Procolix-made ruleset.
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
interfaces = {
eth0 = {
ipv4 = {
addresses = [
{
inherit (config.fediversityVm.ipv4) address prefixLength;
}
];
};
ipv6 = {
addresses = [
{
inherit (config.fediversityVm.ipv6) address prefixLength;
}
];
};
};
}
};
## IPv4
(mkIf config.fediversityVm.ipv4.enable {
interfaces.${config.fediversityVm.ipv4.interface}.ipv4.addresses = [
{ inherit (config.fediversityVm.ipv4) address prefixLength; }
];
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = config.fediversityVm.ipv4.interface;
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
];
})
defaultGateway = {
address = config.fediversityVm.ipv4.gateway;
interface = "eth0";
};
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = "eth0";
};
## IPv6
(mkIf config.fediversityVm.ipv6.enable {
interfaces.${config.fediversityVm.ipv6.interface}.ipv6.addresses = [
{ inherit (config.fediversityVm.ipv6) address prefixLength; }
];
defaultGateway6 = {
address = config.fediversityVm.ipv6.gateway;
interface = config.fediversityVm.ipv6.interface;
};
nameservers = [
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
})
];
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
firewall.enable = false;
nftables = {
enable = true;
rulesetFile = ./nftables-ruleset.nft;
};
};
};
}

View file

@ -1,6 +1,4 @@
{
_class = "nixos";
users.users = {
procolix = {
isNormalUser = true;

View file

@ -6,8 +6,6 @@ let
in
{
# `config` not set and imported from multiple places: no fixed module class
options.fediversityVm = {
##########################################################################
@ -91,17 +89,6 @@ in
};
ipv4 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv4 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 4. It will be injected as a
@ -127,17 +114,6 @@ in
};
ipv6 = {
enable = mkOption {
default = true;
};
interface = mkOption {
description = ''
The interface that carries the machine's IPv6 network.
'';
default = "eth0";
};
address = mkOption {
description = ''
The IP address of the machine, version 6. It will be injected as a

View file

@ -1,10 +1,6 @@
{
inputs,
lib,
config,
sources,
keys,
secrets,
...
}:
@ -12,11 +8,15 @@ let
inherit (lib) attrValues elem mkDefault;
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix;
sources = import ../../npins;
inherit (sources) nixpkgs agenix disko;
secretsPrefix = ../../secrets;
secrets = import (secretsPrefix + "/secrets.nix");
keys = import ../../keys;
in
{
_class = "nixops4Resource";
imports = [ ./options.nix ];
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
@ -26,15 +26,15 @@ in
hostPublicKey = config.fediversityVm.hostPublicKey;
};
inherit (inputs) nixpkgs;
inherit nixpkgs;
## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else
## should go into the `./nixos` subdirectory.
nixos.module = {
imports = [
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
(import "${agenix}/modules/age.nix")
(import "${disko}/module.nix")
./options.nix
./nixos
];
@ -43,15 +43,15 @@ in
## configuration.
fediversityVm = config.fediversityVm;
## Read all the secrets, filter the ones that are supposed to be readable with
## public key, and create a mapping from `<name>.file` to the absolute path of
## the secret's file.
## Read all the secrets, filter the ones that are supposed to be readable
## with this host's public key, and add them correctly to the configuration
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
${removeSuffix ".age" name}.file = secrets.rootPath + "/${name}";
}
) secrets.mapping;
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
})
) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
## supports users with password-less sudo.

View file

@ -1,9 +1,6 @@
{
inputs,
lib,
sources,
keys,
secrets,
...
}:
@ -16,6 +13,7 @@ let
filterAttrs
;
inherit (lib.attrsets) genAttrs;
sources = import ../../npins;
## Given a machine's name and whether it is a test VM, make a resource module,
## except for its missing provider. (Depending on the use of that resource, we
@ -23,20 +21,6 @@ let
makeResourceModule =
{ vmName, isTestVm }:
{
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch flake-parts and have our own data model for how the project is organised internally
_module.args = {
inherit
inputs
sources
keys
secrets
;
};
nixos.module.imports = [
./common/proxmox-qemu-vm.nix
];
imports =
[
./common/resource.nix
@ -44,17 +28,17 @@ let
++ (
if isTestVm then
[
../machines/operator/${vmName}
./test-machines/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
# allow our panel vm access to the test machines
keys.panel
(import ../keys).panel
];
}
]
else
[
../machines/dev/${vmName}
./machines/${vmName}
]
);
fediversityVm.name = vmName;
@ -163,13 +147,11 @@ let
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev;
testMachines = listSubdirectories ../machines/operator;
machines = listSubdirectories ./machines;
testMachines = listSubdirectories ./test-machines;
in
{
_class = "flake";
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment.

View file

@ -7,10 +7,9 @@ Currently, this repository keeps track of the following VMs:
Machine | Proxmox | Description
--------|---------|-------------
[`fedi200`](./dev/fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./dev/fedi201) | fediversity | FediPanel
[`vm02116`](./dev/vm02116) | procolix | Forgejo
[`vm02187`](./dev/vm02187) | procolix | Wiki
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
[`fedi200`](./fedi200) | fediversity | Testing machine for Hans
[`fedi201`](./fedi201) | fediversity | FediPanel
[`vm02116`](./vm02116) | procolix | Forgejo
[`vm02187`](./vm02187) | procolix | Wiki
This table excludes all machines with names starting with `test`.

View file

@ -20,7 +20,7 @@ vmOptions=$(
cd ..
nix eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ../.)).vmOptions
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
" \
--log-format raw --quiet
)
@ -32,12 +32,11 @@ for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
# shellcheck disable=SC2016
printf '[`%s`](./dev/%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
fi
done
cat <<\EOF
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
This table excludes all machines with names starting with `test`.
EOF

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 200;
proxmox = "fediversity";
@ -16,10 +14,4 @@
gateway = "2a00:51c0:13:1305::1";
};
};
nixos.module = {
imports = [
../../../infra/common/proxmox-qemu-vm.nix
];
};
}

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 201;
proxmox = "fediversity";
@ -19,7 +17,6 @@
nixos.module = {
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./fedipanel.nix
];
};

View file

@ -4,14 +4,10 @@
}:
let
name = "panel";
sources = import ../../../npins;
in
{
_class = "nixos";
imports = [
(import ../../../panel { }).module
(import "${sources.home-manager}/nixos")
];
security.acme = {

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 2116;
proxmox = "procolix";
@ -14,7 +12,6 @@
{ lib, ... }:
{
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./forgejo.nix
];

View file

@ -5,8 +5,6 @@ let
in
{
_class = "nixos";
services.forgejo = {
enable = true;

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 2187;
proxmox = "procolix";
@ -14,7 +12,6 @@
{ lib, ... }:
{
imports = [
../../../infra/common/proxmox-qemu-vm.nix
./wiki.nix
];

View file

@ -1,8 +1,6 @@
{ config, ... }:
{
_class = "nixos";
services.phpfpm.pools.mediawiki.phpOptions = ''
upload_max_filesize = 1024M;
post_max_size = 1024M;

View file

@ -15,6 +15,7 @@ let
installer =
{
config,
pkgs,
lib,
...

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7001;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7002;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7003;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7004;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7005;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7006;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7011;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7012;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7013;
proxmox = "fediversity";

View file

@ -1,6 +1,4 @@
{
_class = "nixops4Resource";
fediversityVm = {
vmId = 7014;
proxmox = "fediversity";

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlsYTtMx3hFO8B5B8iHaXL2JKj9izHeC+/AMhIWXBPs cd-age

View file

@ -35,5 +35,4 @@ in
contributors = collectKeys ./contributors;
systems = collectKeys ./systems;
panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub);
cd = removeTrailingWhitespace (readFile ./cd-ssh-key.pub);
}

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.keys = import ./.;
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFXQW5fxJoNY9wtTMsNExgbAbvyljIRGBLjY+USh/0A

View file

@ -1,4 +0,0 @@
# Machines
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.

View file

@ -1,70 +0,0 @@
{ lib, ... }:
let
inherit (lib) mkDefault mkForce;
in
{
_class = "nixops4Resource";
# NOTE: This needs an SSH config entry `forgejo-ci` to locate and access the
# machine. This is because different people access the machine in different
# way (eg. via a proxy vs. via Procolix's VPN). This might look like:
#
# Host forgejo-ci
# HostName 45.142.234.216
# HostKeyAlias forgejo-ci
#
# The `HostKeyAlias` statement is crucial. Without it, deployment will fail
# with the SSH error “Host key verification failed”.
ssh.host = mkForce "forgejo-ci";
fediversityVm = {
domain = "procolix.com";
ipv4 = {
interface = "enp1s0f0";
address = "192.168.201.65";
prefixLength = 24;
gateway = "192.168.201.1";
};
ipv6.enable = false;
};
nixos.module =
{ config, ... }:
{
_class = "nixos";
imports = [
./forgejo-actions-runner.nix
];
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
networking = {
nftables.enable = mkForce false;
hostId = "1d6ea552";
};
## NOTE: This is a physical machine, so is not covered by disko
fileSystems."/" = lib.mkForce {
device = "rpool/root";
fsType = "zfs";
};
fileSystems."/home" = {
device = "rpool/home";
fsType = "zfs";
};
fileSystems."/boot" = lib.mkForce {
device = "/dev/disk/by-uuid/50B2-DD3F";
fsType = "vfat";
options = [
"fmask=0077"
"dmask=0077"
];
};
};
}

View file

@ -1,47 +0,0 @@
{ pkgs, config, ... }:
{
_class = "nixos";
services.gitea-actions-runner = {
package = pkgs.forgejo-actions-runner;
instances.default = {
enable = true;
name = config.networking.fqdn;
url = "https://git.fediversity.eu";
tokenFile = config.age.secrets.forgejo-runner-token.path;
settings = {
log.level = "info";
runner = {
file = ".runner";
# Take only 1 job at a time to avoid clashing NixOS tests, see #362
capacity = 1;
timeout = "3h";
insecure = false;
fetch_timeout = "5s";
fetch_interval = "2s";
};
};
## This runner supports Docker (with a default Ubuntu image) and native
## modes. In native mode, it contains a few default packages.
labels = [
"docker:docker://node:16-bullseye"
"native:host"
];
hostPackages = with pkgs; [
bash
git
nix
nodejs
];
};
};
## For the Docker mode of the runner.
virtualisation.docker.enable = true;
}

View file

@ -1,54 +0,0 @@
## This file contains a tweak of flake-parts's `mkFlake` function to splice in
## sources taken from npins.
## NOTE: Much of the logic in this file feels like it should be not super
## specific to fediversity. Could it make sense to extract the core of this to
## another place it feels closer to in spirit, such as @fricklerhandwerk's
## flake-inputs (which this code already depends on anyway, and which already
## contained two distinct helpers for migrating away from flakes)? cf
## https://git.fediversity.eu/Fediversity/Fediversity/pulls/447#issuecomment-8671
inputs@{ self, ... }:
let
sources = import ./npins;
inherit (import sources.flake-inputs) import-flake;
# XXX(@fricklerhandwerk): this atrocity is required to splice in a foreign Nixpkgs via flake-parts
# XXX - this is just importing a flake
nixpkgs = import-flake { src = sources.nixpkgs; };
# XXX - this overrides the inputs attached to `self`
inputs' = self.inputs // {
nixpkgs = nixpkgs;
};
self' = self // {
inputs = inputs';
};
flake-parts-lib = import "${sources.flake-parts}/lib.nix" { inherit (nixpkgs) lib; };
in
flakeModule:
flake-parts-lib.mkFlake
{
# XXX - finally we override the overall set of `inputs` -- we need both:
# `flake-parts obtains `nixpkgs` from `self.inputs` and not from `inputs`.
inputs = inputs // {
inherit nixpkgs;
};
self = self';
specialArgs = {
inherit sources;
};
}
{
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
imports = [ flakeModule ];
}

View file

@ -25,38 +25,6 @@
"url": null,
"hash": "1w2gsy6qwxa5abkv8clb435237iifndcxq0s79wihqw11a5yb938"
},
"disko": {
"type": "GitRelease",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "disko"
},
"pre_releases": false,
"version_upper_bound": null,
"release_prefix": null,
"submodules": false,
"version": "v1.12.0",
"revision": "7121f74b976481bc36877abaf52adab2a178fcbe",
"url": "https://api.github.com/repos/nix-community/disko/tarball/v1.12.0",
"hash": "0wbx518d2x54yn4xh98cgm65wvj0gpy6nia6ra7ns4j63hx14fkq"
},
"flake-inputs": {
"type": "GitRelease",
"repository": {
"type": "GitHub",
"owner": "fricklerhandwerk",
"repo": "flake-inputs"
},
"pre_releases": false,
"version_upper_bound": null,
"release_prefix": null,
"submodules": false,
"version": "4.1",
"revision": "ad02792f7543754569fe2fd3d5787ee00ef40be2",
"url": "https://api.github.com/repos/fricklerhandwerk/flake-inputs/tarball/4.1",
"hash": "1j57avx2mqjnhrsgq3xl7ih8v7bdhz1kj3min6364f486ys048bm"
},
"flake-parts": {
"type": "Git",
"repository": {
@ -96,19 +64,6 @@
"url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz",
"hash": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w"
},
"home-manager": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "home-manager"
},
"branch": "master",
"submodules": false,
"revision": "863842639722dd12ae9e37ca83bcb61a63b36f6c",
"url": "https://github.com/nix-community/home-manager/archive/863842639722dd12ae9e37ca83bcb61a63b36f6c.tar.gz",
"hash": "0rw9n8d4v87pzlmw7ws15f0sldb51fd9528skpbzmrzl4pinsgij"
},
"htmx": {
"type": "GitRelease",
"repository": {

View file

@ -20,15 +20,8 @@ in
packages = [
pkgs.npins
manage
# NixOps4 and its dependencies
pkgs.nixops4
pkgs.nix
pkgs.openssh
];
env = {
DEPLOYMENT_FLAKE = toString ../.;
DEPLOYMENT_NAME = "test";
env = import ./env.nix { inherit lib pkgs; } // {
NPINS_DIRECTORY = toString ../npins;
CREDENTIALS_DIRECTORY = toString ./.credentials;
DATABASE_URL = "sqlite:///${toString ./src}/db.sqlite3";

18
panel/env.nix Normal file
View file

@ -0,0 +1,18 @@
{
lib,
pkgs,
...
}:
let
inherit (builtins) toString;
in
{
REPO_DIR = toString ../.;
# explicitly use nix, as e.g. lix does not have configurable-impure-env
BIN_PATH = lib.makeBinPath [
# explicitly use nix, as e.g. lix does not have configurable-impure-env
pkgs.nix
# nixops error maybe due to our flake git hook: executing 'git': No such file or directory
pkgs.git
];
}

View file

@ -23,9 +23,7 @@ let
cfg = config.services.${name};
package = pkgs.callPackage ./package.nix { };
environment = {
DEPLOYMENT_FLAKE = cfg.deployment.flake;
DEPLOYMENT_NAME = cfg.deployment.name;
environment = import ../env.nix { inherit lib pkgs; } // {
DATABASE_URL = "sqlite:////var/lib/${name}/db.sqlite3";
USER_SETTINGS_FILE = pkgs.concatText "configuration.py" [
((pkgs.formats.pythonVars { }).generate "settings.py" cfg.settings)
@ -76,8 +74,6 @@ in
# https://git.dgnum.eu/mdebray/djangonix/
# unlicensed at the time of writing, but surely worth taking some inspiration from...
{
_class = "nixos";
options.services.${name} = {
enable = mkEnableOption "Service configuration for `${name}`";
production = mkOption {
@ -137,35 +133,6 @@ in
type = types.attrsOf types.path;
default = { };
};
nixops4Package = mkOption {
type = types.package;
description = ''
A package providing NixOps4.
TODO: This should not be at the level of the NixOS module, but instead
at the level of the panel's package. Until one finds a way to grab
NixOps4 from the package's npins-based code, we will have to do with
this workaround.
'';
default = pkgs.nixops4;
};
deployment = {
flake = mkOption {
type = types.path;
default = ../..;
description = ''
The path to the flake containing the deployment. This is used to run the deployment button.
'';
};
name = mkOption {
type = types.str;
default = "test";
description = ''
The name of the deployment within the flake.
'';
};
};
};
config = mkIf cfg.enable {
@ -202,8 +169,9 @@ in
};
};
# needed to place a config file with home-manager
users.users.${name}.isNormalUser = true;
users.users.${name} = {
isNormalUser = true;
};
users.groups.${name} = { };
systemd.services.${name} = {
@ -213,11 +181,6 @@ in
path = [
python-environment
manage-service
## NixOps4 and its dependencies
cfg.nixops4Package
pkgs.nix
pkgs.openssh
];
preStart = ''
# Auto-migrate on first run or if the package has changed

View file

@ -8,17 +8,4 @@ let
in
{
python3 = prev.lib.attrsets.recursiveUpdate prev.python3 { pkgs = extraPython3Packages; };
nixops4 =
let
sources = import ../../npins;
inherit (import sources.flake-inputs) import-flake;
inherit
(import-flake {
src = ../../.;
})
inputs
;
inherit (inputs) nixops4;
in
nixops4.packages.${prev.system}.default;
}

Some files were not shown because too many files have changed in this diff Show more