data model: add TF test #4

Closed
kiara wants to merge 76 commits from data-model-tf-test into main
63 changed files with 1411 additions and 229 deletions

View file

@ -56,3 +56,47 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L - run: nix build .#checks.x86_64-linux.deployment-panel -L
check-deployment-model-ssh:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-ssh -L
check-deployment-model-nixops4:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-nixops4 -L
check-deployment-model-tf:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf -L
## NOTE: NixOps4 does not provide a good “dry run” mode, so we instead check
## proxies for resources, namely whether their `.#vmOptions.<machine>` and
## `.#nixosConfigurations.<machine>` outputs evaluate and build correctly, and
## whether we can dry run `infra/proxmox-*.sh` on them. This will not catch
## everything, and in particular not issues in how NixOps4 wires up the
## resources, but that is still something.
check-resources:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: |
set -euC
echo ==================== [ VM Options ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).vmOptions)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.vmOptions-$machine
done
echo
echo ==================== [ NixOS Configurations ] ====================
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).nixosConfigurations)')
for machine in $machines; do
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
nix build .#checks.x86_64-linux.nixosConfigurations-$machine
done

View file

@ -13,7 +13,7 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Update pins - name: Update pins
run: nix-shell --run "npins update" run: nix-shell --run "npins --verbose update"
- name: Create PR - name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397 uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
with: with:

View file

@ -11,7 +11,8 @@ let
; ;
inherit (pkgs) lib; inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake; inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4; inputs = (import-flake { src = ./.; }).inputs;
inherit (inputs) nixops4;
panel = import ./panel { inherit sources system; }; panel = import ./panel { inherit sources system; };
pre-commit-check = pre-commit-check =
(import "${git-hooks}/nix" { (import "${git-hooks}/nix" {
@ -78,6 +79,7 @@ in
# re-export inputs so they can be overridden granularly # re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way) # (they can't be accessed from the outside any other way)
inherit inherit
inputs
sources sources
system system
pkgs pkgs

View file

@ -5,4 +5,5 @@
]; ];
pathToRoot = ../../..; pathToRoot = ../../..;
pathFromRoot = ./.; pathFromRoot = ./.;
useFlake = true;
} }

View file

@ -10,5 +10,10 @@ runNixOSTest {
./nixosTest.nix ./nixosTest.nix
]; ];
_module.args = { inherit inputs sources; }; _module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot; inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
useFlake
;
} }

View file

@ -1,4 +1,9 @@
{ inputs, lib, ... }: {
inputs,
lib,
config,
...
}:
{ {
_class = "nixosTest"; _class = "nixosTest";
@ -8,6 +13,7 @@
sourceFileset = lib.fileset.unions [ sourceFileset = lib.fileset.unions [
./constants.nix ./constants.nix
./deployment.nix ./deployment.nix
(config.pathToCwd + "/flake-under-test.nix")
]; ];
nodes.deployer = nodes.deployer =

View file

@ -8,4 +8,5 @@
pathToRoot = ../../..; pathToRoot = ../../..;
pathFromRoot = ./.; pathFromRoot = ./.;
enableAcme = true; enableAcme = true;
useFlake = true;
} }

View file

@ -15,5 +15,6 @@ runNixOSTest {
pathToRoot pathToRoot
pathFromRoot pathFromRoot
enableAcme enableAcme
useFlake
; ;
} }

View file

@ -1,6 +1,7 @@
{ {
inputs, inputs,
hostPkgs, hostPkgs,
config,
lib, lib,
... ...
}: }:
@ -19,6 +20,7 @@ in
sourceFileset = lib.fileset.unions [ sourceFileset = lib.fileset.unions [
./constants.nix ./constants.nix
./deployments.nix ./deployments.nix
(config.pathToCwd + "/flake-under-test.nix")
# REVIEW: I would like to be able to grab all of `/deployment` minus # REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files # `/deployment/check`, but I can't because there is a bunch of other files

View file

@ -0,0 +1,15 @@
{
lib,
...
}:
let
inherit (lib) types;
in
{
options = {
host = lib.mkOption {
type = types.str;
description = "name of the host to deploy to";
};
};
}

View file

@ -0,0 +1,214 @@
{
config,
system,
inputs ? (import ../../../default.nix { }).inputs,
sources ? import ../../../npins,
...
}:
let
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
deployment-config = config;
inherit (deployment-config) nodeName;
inherit (lib) mkOption types;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit pkgs inputs;
};
modules = [
module
../../data-model.nix
];
}).config;
fediversity = eval (
{ config, ... }:
{
config = {
resources.login-shell = {
description = "The operator needs to be able to log into the shell";
request =
{ ... }:
{
_class = "fediversity-resource-request";
options = {
wheel = mkOption {
description = "Whether the login user needs root permissions";
type = types.bool;
default = false;
};
packages = mkOption {
description = "Packages that need to be available in the user environment";
type = with types; attrsOf package;
};
};
};
policy =
{ config, ... }:
{
_class = "fediversity-resource-policy";
options = {
username = mkOption {
description = "Username for the operator";
type = types.str; # TODO: use the proper constraints from NixOS
};
wheel = mkOption {
description = "Whether to allow login with root permissions";
type = types.bool;
default = false;
};
};
config = {
resource-type = types.raw; # TODO: splice out the user type from NixOS
apply =
requests:
let
# Filter out requests that need wheel if policy doesn't allow it
validRequests = lib.filterAttrs (
_name: req: !req.login-shell.wheel || config.wheel
) requests.resources;
in
lib.optionalAttrs (validRequests != { }) {
${config.username} = {
isNormalUser = true;
packages =
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
};
};
};
};
};
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options.enable = lib.mkEnableOption "Hello in the shell";
};
implementation = cfg: {
input = cfg;
output.resources = lib.optionalAttrs cfg.enable {
hello.login-shell.packages.hello = pkgs.hello;
};
};
};
environments =
let
mkNixosConfiguration =
environment: requests:
{ ... }:
{
imports = [
./data-model-options.nix
../common/sharedOptions.nix
../common/targetNode.nix
"${nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
];
users.users = environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
in
{
single-nixos-vm-ssh = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation = requests: {
input = requests;
output.ssh-host = {
nixos-configuration = mkNixosConfiguration environment requests;
ssh = {
username = "root";
host = nodeName;
key-file = null;
};
};
};
};
single-nixos-vm-nixops4 = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation = requests: {
input = requests;
output.nixops4 =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources.${nodeName} = {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
nixos.module = mkNixosConfiguration environment requests;
_module.args = { inherit inputs sources; };
inherit (deployment-config) nodeName pathToRoot pathFromRoot;
};
};
};
};
single-nixos-vm-tf = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation = requests: {
input = requests;
output.tf-host = {
nixos-configuration = mkNixosConfiguration environment requests;
ssh = {
username = "root";
host = nodeName;
key-file = null;
};
};
};
};
};
};
options = {
"example-configuration" = mkOption {
type = config.configuration;
default = {
enable = true;
applications.hello.enable = true;
};
};
"ssh-deployment" =
let
env = config.environments."single-nixos-vm-ssh";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment config."example-configuration";
};
"nixops4-deployment" =
let
env = config.environments."single-nixos-vm-nixops4";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment config."example-configuration";
};
"tf-deployment" =
let
env = config.environments."single-nixos-vm-tf";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment config."example-configuration";
};
};
}
);
in
fediversity

View file

@ -59,6 +59,7 @@ in
inputs.nixpkgs inputs.nixpkgs
sources.flake-parts sources.flake-parts
sources.nixpkgs
sources.flake-inputs sources.flake-inputs
sources.git-hooks sources.git-hooks

View file

@ -48,7 +48,8 @@ in
extraTestScript = mkOption { }; extraTestScript = mkOption { };
sourceFileset = mkOption { sourceFileset = mkOption {
## REVIEW: Upstream to nixpkgs? ## FIXME: grab `lib.types.fileset` from NixOS, once upstreaming PR
## https://github.com/NixOS/nixpkgs/pull/428293 lands.
type = types.mkOptionType { type = types.mkOptionType {
name = "fileset"; name = "fileset";
description = "fileset"; description = "fileset";
@ -75,8 +76,6 @@ in
./sharedOptions.nix ./sharedOptions.nix
./targetNode.nix ./targetNode.nix
./targetResource.nix ./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
]; ];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress; acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
@ -163,31 +162,38 @@ in
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub") deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
'')} '')}
## NOTE: This is super slow. It could probably be optimised in Nix, for ${
## instance by allowing to grab things directly from the host's store. if config.useFlake then
## ''
## NOTE: We use the repository as-is (cf `src` above), overriding only ## NOTE: This is super slow. It could probably be optimised in Nix, for
## `flake.nix` by our `flake-under-test.nix`. We also override the flake ## instance by allowing to grab things directly from the host's store.
## lock file to use locally available inputs, as we cannot download them. ##
## ## NOTE: We use the repository as-is (cf `src` above), overriding only
with subtest("Override the flake and its lock"): ## `flake.nix` by our `flake-under-test.nix`. We also override the flake
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix") ## lock file to use locally available inputs, as we cannot download them.
deployer.succeed(""" ##
nix flake lock --extra-experimental-features 'flakes nix-command' \ with subtest("Override the flake and its lock"):
--offline -v \ deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \ deployer.succeed("""
\ nix flake lock --extra-experimental-features 'flakes nix-command' \
--override-input nixops4-nixos ${inputs.nixops4-nixos} \ --offline -v \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \ --override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \ \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \ --override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \ --override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/nixops4 ${ --override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle --override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
} \ --override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \ --override-input nixops4-nixos/nixops4 ${
; inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
""") } \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
;
""")
''
else
""
}
${optionalString config.enableAcme '' ${optionalString config.enableAcme ''
with subtest("Set up handmade DNS"): with subtest("Set up handmade DNS"):

View file

@ -64,5 +64,7 @@ in
during the test to the correct value. during the test to the correct value.
''; '';
}; };
useFlake = lib.mkEnableOption "Use a flake in the test.";
}; };
} }

View file

@ -28,6 +28,8 @@ in
system.switch.enable = true; system.switch.enable = true;
nix = { nix = {
# short-cut network time-outs
settings.download-attempts = 1;
## Not used; save a large copy operation ## Not used; save a large copy operation
channel.enable = false; channel.enable = false;
registry = lib.mkForce { }; registry = lib.mkForce { };

View file

@ -0,0 +1,9 @@
{
targetMachines = [
"nixops4"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
useFlake = true;
}

View file

@ -0,0 +1,22 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
useFlake
;
}

View file

@ -0,0 +1,29 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, ... }:
let
system = "x86_64-linux";
in
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-model =
(import ./deployment/check/common/data-model.nix {
inherit system inputs;
config = {
inherit (import ./deployment/check/data-model-nixops4/constants.nix) pathToRoot pathFromRoot;
nodeName = "nixops4";
};
})."nixops4-deployment".nixops4;
}
);
}

View file

@ -0,0 +1,52 @@
{
lib,
config,
inputs,
...
}:
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
../../function.nix
../common/data-model.nix
../common/data-model-options.nix
./constants.nix
(config.pathToCwd + "/flake-under-test.nix")
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
inputs.nixops4.packages.${system}.default
jq
];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
};
extraTestScript = ''
with subtest("nixops4"):
nixops4.fail("hello 1>&2")
deployer.succeed("nixops4 apply check-deployment-model --show-trace --verbose --no-interactive 1>&2")
nixops4.succeed("su - operator -c hello 1>&2")
'';
}

View file

@ -0,0 +1,8 @@
{
targetMachines = [
"ssh"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -0,0 +1,21 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1,110 @@
{
lib,
config,
pkgs,
inputs,
...
}:
let
inherit (import ./constants.nix) pathToRoot pathFromRoot;
inherit (pkgs) system;
escapedJson = v: lib.replaceStrings [ "\"" ] [ "\\\\\"" ] (lib.strings.toJSON v);
deployment-config = {
inherit pathToRoot pathFromRoot;
inherit (config) enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
nodeName = "ssh";
};
inherit
((import ../common/data-model.nix {
inherit system inputs;
config = deployment-config;
})."ssh-deployment".ssh-host.ssh
)
host
username
key-file
;
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
../../function.nix
../common/data-model.nix
../common/data-model-options.nix
./constants.nix
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
jq
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
};
extraTestScript = ''
with subtest("ssh: Check the status before deployment"):
ssh.fail("hello 1>&2")
with subtest("ssh: Run the deployment"):
deployer.succeed("""
set -euo pipefail
# INSTANTIATE
command=(nix-instantiate --show-trace --expr '
let
system = "${pkgs.system}"; # FIXME: what system are we deploying to?
in
import ${pathToRoot}/deployment/nixos.nix {
inherit system;
configuration = (
import ${pathToRoot}/deployment/check/common/data-model.nix {
inherit system;
config = builtins.fromJSON "${escapedJson deployment-config}";
}
)."ssh-deployment".ssh-host.nixos-configuration;
}
')
# DEPLOY
host="${lib.defaultTo "root" username}@${host}"
sshOpts=(
${if key-file == null then "" else "-i ${key-file}"}
-o StrictHostKeyChecking=no
-o "ConnectTimeout=1"
-o "ServerAliveInterval=1"
)
# instantiate the config in /nix/store
"''${command[@]}" --show-trace -A out_path
# get the realized derivation to deploy
outPath=$(nix-store --realize "$("''${command[@]}" --show-trace --eval --strict --json | jq -r '.drv_path')")
# deploy the config by nix-copy-closure
NIX_SSHOPTS="''${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
# switch the remote host to the config
output=$(ssh "''${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; nohup $outPath/bin/switch-to-configuration switch &" 2>&1) || echo "status code: $?"
echo "output: $output"
if [[ $output != *"Timeout, server ssh not responding"* ]]; then
echo "non-timeout error: $output"
exit 1
else
exit 0
fi
""")
ssh.wait_for_unit("multi-user.target")
ssh.succeed("su - operator -c hello 1>&2")
'';
}

View file

@ -0,0 +1,8 @@
{
targetMachines = [
"target"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
enableAcme = true;
}

View file

@ -0,0 +1,21 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1,35 @@
#! /usr/bin/env bash
set -xeuo pipefail
declare username host system config_nix config_tf
# INSTANTIATE
command=(nix-instantiate --argstr system "$system" --argstr config_nix "$config_nix" --argstr config_tf "$config_tf" ./nixos.nix)
# instantiate the config in /nix/store
"${command[@]}" -A out_path
# DEPLOY
sshOpts=(
-o BatchMode=yes
-o StrictHostKeyChecking=no
# TODO set key for production
# ${if key-file == null then "" else "-i ${key-file}"}
# NOTE the below options are for tests
-o ConnectTimeout=1
-o ServerAliveInterval=1
)
destination="$username@$host"
# get the realized derivation to deploy
outPath=$(nix-store --realize "$("${command[@]}" --show-trace --eval --strict --json | jq -r '.drv_path')")
# deploy the config by nix-copy-closure
NIX_SSHOPTS="${sshOpts[*]}" nix-copy-closure --to "$destination" "$outPath" --gzip --use-substitutes
# switch the remote host to the config
# NOTE checks here are for tests - in production time-outs could be a real thing, rather than indicator of success!
# shellcheck disable=SC2029
output=$(ssh "${sshOpts[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; nohup $outPath/bin/switch-to-configuration switch &" 2>&1) || echo "status code: $?"
echo "output: $output"
if [[ $output != *"Timeout, server $host not responding"* ]]; then
echo "non-timeout error: $output"
exit 1
else
exit 0
fi

View file

@ -0,0 +1,44 @@
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
# TF resource to build and deploy NixOS instances.
resource "terraform_data" "nixos" {
# trigger rebuild/deploy if (FIXME?) any potentially used config/code changed,
# preventing these (20+s, build being bottleneck) when nothing changed.
# terraform-nixos separates these to only deploy if instantiate changed,
# yet building even then - which may be not as bad using deploy on remote.
# having build/deploy one resource reflects wanting to prevent no-op rebuilds
# over preventing (with less false positives) no-op deployments,
# as i could not find a way to do prevent no-op rebuilds without merging them:
# - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?).
# - `data` always runs, which is slow for deploy and especially build.
triggers_replace = [
data.external.hash.result,
var.host,
var.config_nix,
var.config_tf,
]
provisioner "local-exec" {
# directory to run the script from. we use the TF project root dir,
# here as a path relative from where TF is run from,
# matching calling modules' expectations on config_nix locations.
# note that absolute paths can cause false positives in triggers,
# so are generally discouraged in TF.
working_dir = path.root
environment = {
system = var.system
username = var.username
host = var.host
config_nix = var.config_nix
config_tf = replace(jsonencode(var.config_tf), "\"", "\\\"")
}
# TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs?
# TODO: build on target?
command = "sh deploy.sh"
}
}

View file

@ -0,0 +1,13 @@
{
system,
config_nix,
config_tf,
}:
import ../../nixos.nix {
inherit system;
configuration =
(import ../common/data-model.nix {
inherit system;
config = config_nix // builtins.fromJSON config_tf;
})."tf-deployment".tf-host.nixos-configuration;
}

View file

@ -0,0 +1,91 @@
{
lib,
config,
pkgs,
inputs,
...
}:
let
inherit (import ./constants.nix) pathToRoot pathFromRoot;
inherit (pkgs) system;
# escapedJson = v: lib.replaceStrings [ "\"" ] [ "\\\\\"" ] (lib.strings.toJSON v);
deployment-config = {
inherit pathToRoot pathFromRoot;
inherit (config) enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
nodeName = "target";
};
inherit
((import ../common/data-model.nix {
inherit system inputs;
config = deployment-config;
})."tf-deployment".tf-host.ssh
)
host
username
# key-file
;
tf-vars = {
inherit host username system;
config_nix = lib.strings.toJSON deployment-config;
# config_nix = escapedJson deployment-config;
# config_tf = ;
};
tf-env = pkgs.callPackage ./tf-env.nix { };
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
../../function.nix
../common/data-model.nix
../common/data-model-options.nix
./constants.nix
./main.tf
./variables.tf
./deploy.sh
];
nodes.deployer =
{ pkgs, ... }:
{
# nixpkgs.config.allowUnfree = lib.mkForce true;
environment.systemPackages = with pkgs; [
(pkgs.callPackage ./tf.nix { })
jq
];
# needed only when building from deployer
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
};
extraTestScript = ''
with subtest("ssh: Check the status before deployment"):
target.fail("hello 1>&2")
with subtest("ssh: Run the deployment"):
deployer.succeed("""
set -xeuo pipefail
${lib.concatStringsSep "\n" (lib.mapAttrsToList (k: v: ''export TF_VAR_${k}='${v}';'') tf-vars)}
export TF_LOG=info
cd "${tf-env}/deployment/check/data-model-tf"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -lock=false -parallelism=1
""")
target.wait_for_unit("multi-user.target")
target.succeed("su - operator -c hello 1>&2")
'';
}

View file

@ -0,0 +1,19 @@
{
pkgs,
lib,
sources,
}:
pkgs.writeScriptBin "setup" ''
# calculated pins
echo '${lib.strings.toJSON sources}' > ./.npins.json
# generate TF lock for nix's TF providers
for category in deployment/check/data-model-tf; do
pushd "$category"
rm -rf .terraform/
rm -f .terraform.lock.hcl
# suppress warning on architecture-specific generated lock file:
# `Warning: Incomplete lock file information for providers`.
tofu init -input=false 1>/dev/null
popd
done
''

View file

@ -0,0 +1,31 @@
{
lib,
pkgs,
sources ? import ../../../npins,
}:
pkgs.stdenv.mkDerivation {
name = "tf-repo";
src =
with lib.fileset;
toSource {
root = ../../../.;
# don't copy ignored files
fileset = intersection (gitTracked ../../../.) ../../../.;
};
buildInputs = [
(pkgs.callPackage ./tf.nix { })
(pkgs.callPackage ./setup.nix { inherit sources; })
];
buildPhase = ''
runHook preBuild
pushd deployment/check/data-model-tf
setup
popd
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r . $out
runHook postInstall
'';
}

View file

@ -0,0 +1,11 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
...
}:
let
tf = pkgs.opentofu;
in
tf.withPlugins (p: [
p.external
])

View file

@ -0,0 +1,23 @@
variable "system" {
type = string
default = "x86_64-linux"
}
variable "username" {
type = string
default = "root"
}
variable "host" {
type = string
}
variable "config_nix" {
type = string
default = "{}"
}
variable "config_tf" {
type = map(any)
default = {}
}

View file

@ -8,4 +8,5 @@
pathToRoot = ../../..; pathToRoot = ../../..;
pathFromRoot = ./.; pathFromRoot = ./.;
enableAcme = true; enableAcme = true;
useFlake = true;
} }

View file

@ -15,5 +15,6 @@ runNixOSTest {
pathToRoot pathToRoot
pathFromRoot pathFromRoot
enableAcme enableAcme
useFlake
; ;
} }

View file

@ -128,6 +128,7 @@ in
sourceFileset = lib.fileset.unions [ sourceFileset = lib.fileset.unions [
./constants.nix ./constants.nix
./deployment.nix ./deployment.nix
(config.pathToCwd + "/flake-under-test.nix")
# REVIEW: I would like to be able to grab all of `/deployment` minus # REVIEW: I would like to be able to grab all of `/deployment` minus
# `/deployment/check`, but I can't because there is a bunch of other files # `/deployment/check`, but I can't because there is a bunch of other files

View file

@ -1,29 +1,94 @@
let let
inherit (import ../default.nix { }) pkgs inputs; inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib; inherit (pkgs) lib;
inherit (lib) mkOption; inherit (lib) mkOption types;
eval = eval =
module: module:
(lib.evalModules { (lib.evalModules {
specialArgs = { specialArgs = {
inherit inputs; inherit pkgs inputs;
}; };
modules = [ modules = [
module module
./data-model.nix ./data-model.nix
]; ];
}).config; }).config;
inherit (inputs.nixops4.lib) mkDeployment;
in in
{ {
_class = "nix-unit"; _class = "nix-unit";
test-eval = { test-eval = {
/**
This tests a very simple arrangement that features all ingredients of the Fediversity business logic:
application, resource, environment, deployment; and wires it all up in one end-to-end exercise.
- The dummy resource is a login shell made available for some user.
- The dummy application is `hello` that requires a shell to be deployed.
- The dummy environment is a single NixOS VM that hosts one login shell, for the operator.
- The dummy configuration enables the `hello` application.
This will produce a NixOps4 deployment for a NixOS VM with a login shell for the operator and `hello` available.
*/
expr = expr =
let let
fediversity = eval ( fediversity = eval (
{ config, ... }: { config, ... }:
{ {
config = { config = {
resources.login-shell = {
description = "The operator needs to be able to log into the shell";
request =
{ ... }:
{
_class = "fediversity-resource-request";
options = {
wheel = mkOption {
description = "Whether the login user needs root permissions";
type = types.bool;
default = false;
};
packages = mkOption {
description = "Packages that need to be available in the user environment";
type = with types; attrsOf package;
};
};
};
policy =
{ config, ... }:
{
_class = "fediversity-resource-policy";
options = {
username = mkOption {
description = "Username for the operator";
type = types.str; # TODO: use the proper constraints from NixOS
};
wheel = mkOption {
description = "Whether to allow login with root permissions";
type = types.bool;
default = false;
};
};
config = {
resource-type = types.raw; # TODO: splice out the user type from NixOS
apply =
requests:
let
# Filter out requests that need wheel if policy doesn't allow it
validRequests = lib.filterAttrs (
_name: req: !req.login-shell.wheel || config.wheel
) requests.resources;
in
lib.optionalAttrs (validRequests != { }) {
${config.username} = {
isNormalUser = true;
packages =
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
};
};
};
};
};
applications.hello = applications.hello =
{ ... }: { ... }:
{ {
@ -31,19 +96,50 @@ in
module = module =
{ ... }: { ... }:
{ {
options = { options.enable = lib.mkEnableOption "Hello in the shell";
enable = lib.mkEnableOption "Hello in the shell"; };
implementation = cfg: {
input = cfg;
output.resources = lib.optionalAttrs cfg.enable {
hello.login-shell.packages.hello = pkgs.hello;
};
};
};
environments.single-nixos-vm =
{ config, ... }:
{
resources."operator-environment".login-shell.username = "operator";
implementation = requests: {
input = requests;
output.nixops4 =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources.the-machine = {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
];
nixos.module =
{ ... }:
{
users.users = config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
};
}; };
}; };
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
}; };
}; };
options = { options = {
example-configuration = mkOption { "example-configuration" = mkOption {
type = config.configuration; type = config.configuration;
readOnly = true; readOnly = true;
default = { default = {
@ -51,20 +147,66 @@ in
applications.hello.enable = true; applications.hello.enable = true;
}; };
}; };
"example-deployment" = mkOption {
type = config.environments.single-nixos-vm.resource-mapping.output-type;
readOnly = true;
default = config.environments.single-nixos-vm.deployment config."example-configuration";
};
}; };
} }
); );
resources =
fediversity.applications.hello.resources
fediversity."example-configuration".applications.hello;
hello-shell = resources.resources.hello.login-shell;
environment = fediversity.environments.single-nixos-vm.resources."operator-environment".login-shell;
result = mkDeployment {
modules = [
(fediversity.environments.single-nixos-vm.deployment fediversity."example-configuration")
];
};
in in
{ {
inherit (fediversity) number-of-resources = with lib; length (attrNames fediversity.resources);
example-configuration inherit (fediversity) example-configuration;
; hello-package-exists = hello-shell.packages ? hello;
wheel-required = hello-shell.wheel;
wheel-allowed = environment.wheel;
operator-shell =
let
operator = (environment.apply resources).operator;
in
{
inherit (operator) isNormalUser;
packages = map (p: "${p.pname}") operator.packages;
extraGroups = operator.extraGroups;
};
deployment = {
inherit (result) _type;
deploymentFunction = lib.isFunction result.deploymentFunction;
getProviders = lib.isFunction result.getProviders;
};
}; };
expected = { expected = {
number-of-resources = 1;
example-configuration = { example-configuration = {
enable = true; enable = true;
applications.hello.enable = true; applications.hello.enable = true;
}; };
hello-package-exists = true;
wheel-required = false;
wheel-allowed = false;
operator-shell = {
isNormalUser = true;
packages = [ "hello" ];
extraGroups = [ ];
};
deployment = {
_type = "nixops4Deployment";
deploymentFunction = true;
getProviders = true;
};
}; };
}; };
} }

View file

@ -1,33 +1,140 @@
{ {
lib, lib,
config, config,
inputs,
pkgs,
... ...
}: }:
let let
inherit (lib) mkOption types; inherit (lib) mkOption types;
inherit (lib.types) inherit (lib.types)
attrsOf
attrTag attrTag
attrsOf
deferredModuleWith deferredModuleWith
submodule
optionType
functionTo functionTo
nullOr
optionType
raw
str
submodule
; ;
functionType = import ./function.nix; functionType = import ./function.nix;
application-resources = { application-resources = submodule {
options.resources = mkOption { options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead # TODO: maybe transpose, and group the resources by type instead
type = attrsOf ( type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources) attrTag (
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.request; }) config.resources
)
); );
}; };
}; };
nixops4Deployment = types.deferredModuleWith {
staticModules = [
inputs.nixops4.modules.nixops4Deployment.default
{
_class = "nixops4Deployment";
_module.args = {
resourceProviderSystem = pkgs.system;
resources = { };
};
}
];
};
nixos-configuration = mkOption {
description = "A NixOS configuration.";
type = raw;
};
host-ssh = mkOption {
description = "SSH connection info to connect to a single host.";
type = submodule {
options = {
host = mkOption {
description = "the host to access by SSH";
type = str;
};
username = mkOption {
description = "the SSH user to use";
type = nullOr str;
default = null;
};
key-file = mkOption {
description = "path to the user's SSH private key";
type = nullOr str;
example = "/root/.ssh/id_ed25519";
};
};
};
};
deployment-type = attrTag {
ssh-host = mkOption {
description = "A deployment by SSH to update a single existing NixOS host.";
type = submodule {
options = {
inherit nixos-configuration;
ssh = host-ssh;
};
};
};
nixops4 = mkOption {
description = "A NixOps4 NixOS deployment. For an example, see https://github.com/nixops4/nixops4-nixos/blob/main/example/deployment.nix.";
type = nixops4Deployment;
};
tf-host = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
type = submodule {
options = {
inherit nixos-configuration;
ssh = host-ssh;
};
};
};
};
in in
{ {
_class = "nixops4Deployment";
options = { options = {
resources = mkOption {
description = "Collection of deployment resources that can be required by applications and policed by hosting providers";
type = attrsOf (
submodule (
{ ... }:
{
_class = "fediversity-resource";
options = {
description = mkOption {
description = "Description of the resource to help application module authors and hosting providers to work with it";
type = types.str;
};
request = mkOption {
description = "Options for declaring resource requirements by an application, a description of how the resource is consumed or accessed";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-resource-request"; } ]; };
};
policy = mkOption {
description = "Options for configuring the resource policy for the hosting provider, a description of how the resource is made available";
type = deferredModuleWith {
staticModules = [
(policy: {
_class = "fediversity-resource-policy";
options.resource-type = mkOption {
description = "The type of resource this policy configures";
type = types.optionType;
};
# TODO(@fricklerhandwerk): we may want to make the function type explicit here: `application-resources -> resource-type`
options.apply = mkOption {
description = "Apply the policy to a request";
type = functionTo policy.config.resource-type;
};
})
];
};
};
};
}
)
);
};
applications = mkOption { applications = mkOption {
description = "Collection of Fediversity applications"; description = "Collection of Fediversity applications";
type = attrsOf ( type = attrsOf (
@ -52,12 +159,13 @@ in
readOnly = true; readOnly = true;
default = input: (application.config.implementation input).output; default = input: (application.config.implementation input).output;
}; };
# TODO(@fricklerhandwerk): this needs a better name, it's just the type
config-mapping = mkOption { config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources"; description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType; type = submodule functionType;
readOnly = true; readOnly = true;
default = { default = {
input-type = application.config.module; input-type = submodule application.config.module;
output-type = application-resources; output-type = application-resources;
}; };
}; };
@ -65,6 +173,60 @@ in
}) })
); );
}; };
environments = mkOption {
description = "Run-time environments for Fediversity applications to be deployed to";
type = attrsOf (
submodule (environment: {
_class = "fediversity-environment";
options = {
resources = mkOption {
description = ''
Resources made available by the hosting provider, and their policies.
Setting this is optional, but provides a place to declare that information for programmatic use in the resource mapping.
'';
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.policy; }) config.resources
)
);
};
implementation = mkOption {
description = "Mapping of resources required by applications to available resources; the result can be deployed";
type = environment.config.resource-mapping.function-type;
};
resource-mapping = mkOption {
description = "Function type for the mapping from resources to a deployment";
type = submodule functionType;
readOnly = true;
default = {
input-type = attrsOf application-resources;
output-type = deployment-type;
};
};
# TODO(@fricklerhandwerk): maybe this should be a separate thing such as `fediversity-setup`,
# which makes explicit which applications and environments are available.
# then the deployments can simply be the result of the function application baked into this module.
deployment = mkOption {
description = "Generate a deployment from a configuration, by applying an environment's resource policies to the applications' resource mappings";
type = functionTo (environment.config.resource-mapping.output-type);
readOnly = true;
default =
cfg:
# TODO: check cfg.enable.true
let
required-resources = lib.mapAttrs (
name: application-settings: config.applications.${name}.resources application-settings
) cfg.applications;
in
(environment.config.implementation required-resources).output;
};
};
})
);
};
configuration = mkOption { configuration = mkOption {
description = "Configuration type declaring options to be set by operators"; description = "Configuration type declaring options to be set by operators";
type = optionType; type = optionType;

View file

@ -21,6 +21,21 @@
inherit (pkgs.testers) runNixOSTest; inherit (pkgs.testers) runNixOSTest;
inherit inputs sources; inherit inputs sources;
}; };
deployment-model-ssh = import ./check/data-model-ssh {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-model-nixops4 = import ./check/data-model-nixops4 {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-model-tf = import ./check/data-model-tf {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
}; };
}; };
} }

View file

@ -5,7 +5,6 @@
let let
inherit (lib) mkOption types; inherit (lib) mkOption types;
inherit (types) inherit (types)
deferredModule
submodule submodule
functionTo functionTo
optionType optionType
@ -14,10 +13,10 @@ in
{ {
options = { options = {
input-type = mkOption { input-type = mkOption {
type = deferredModule; type = optionType;
}; };
output-type = mkOption { output-type = mkOption {
type = deferredModule; type = optionType;
}; };
function-type = mkOption { function-type = mkOption {
type = optionType; type = optionType;
@ -25,10 +24,10 @@ in
default = functionTo (submodule { default = functionTo (submodule {
options = { options = {
input = mkOption { input = mkOption {
type = submodule config.input-type; type = config.input-type;
}; };
output = mkOption { output = mkOption {
type = submodule config.output-type; type = config.output-type;
}; };
}; };
}); });

23
deployment/nixos.nix Normal file
View file

@ -0,0 +1,23 @@
{
configuration,
system,
sources ? import ../npins,
}:
let
eval = import "${sources.nixpkgs}/nixos/lib/eval-config.nix" {
inherit system;
specialArgs = {
inherit sources;
};
modules = [ configuration ];
};
os = {
inherit (eval) pkgs config options;
system = eval.config.system.build.toplevel;
inherit (eval.config.system.build) vm vmWithBootLoader;
};
in
{
drv_path = os.config.system.build.toplevel.drvPath;
out_path = os.config.system.build.toplevel;
}

View file

@ -6,7 +6,7 @@
_class = "nixos"; _class = "nixos";
users.users = { users.users = {
root.openssh.authorizedKeys.keys = config.user.users.procolix.openssh.authorizedKeys.keys; root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = { procolix = {
isNormalUser = true; isNormalUser = true;

View file

@ -20,16 +20,13 @@ in
''; '';
}; };
proxmox = mkOption { isFediversityVm = mkOption {
type = types.nullOr ( type = types.bool;
types.enum [
"procolix"
"fediversity"
]
);
description = '' description = ''
The Proxmox instance. This is used for provisioning only and should be Whether the machine is a Fediversity VM or not. This is used to
set to `null` if the machine is not a VM. determine whether the machine should be provisioned via Proxmox or not.
Machines that are _not_ Fediversity VM could be physical machines, or
VMs that live outside Fediversity, eg. on Procolix's Proxmox.
''; '';
}; };

View file

@ -1,10 +1,14 @@
{ sources, ... }: { ... }:
{ {
_class = "nixos"; _class = "nixos";
imports = [ ## FIXME: It would be nice, but the following leads to infinite recursion
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix" ## in the way we currently plug `sources` in.
]; ##
# imports = [
# "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# ];
boot = { boot = {
initrd = { initrd = {

View file

@ -2,7 +2,6 @@
inputs, inputs,
lib, lib,
config, config,
sources,
keys, keys,
secrets, secrets,
... ...
@ -33,10 +32,9 @@ in
## should go into the `./nixos` subdirectory. ## should go into the `./nixos` subdirectory.
nixos.module = { nixos.module = {
imports = [ imports = [
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
./options.nix ./options.nix
./nixos ./nixos
./proxmox-qemu-vm.nix
]; ];
## Inject the shared options from the resource's `config` into the NixOS ## Inject the shared options from the resource's `config` into the NixOS

View file

@ -14,88 +14,55 @@ let
mkOption mkOption
evalModules evalModules
filterAttrs filterAttrs
mapAttrs'
deepSeq
; ;
inherit (lib.attrsets) genAttrs; inherit (lib.attrsets) genAttrs;
## Given a machine's name and whether it is a test VM, make a resource module, commonResourceModule = {
## except for its missing provider. (Depending on the use of that resource, we # TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
## will provide a different one.) # flake-parts and have our own data model for how the project is organised
makeResourceModule = # internally
{ vmName, isTestVm }: _module.args = {
{ inherit
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch flake-parts and have our own data model for how the project is organised internally inputs
_module.args = { keys
inherit secrets
inputs sources
keys ;
secrets
;
};
nixos.module.imports = [
./common/proxmox-qemu-vm.nix
];
nixos.specialArgs = {
inherit sources;
};
imports =
[
./common/resource.nix
]
++ (
if isTestVm then
[
../machines/operator/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
# allow our panel vm access to the test machines
keys.panel
];
}
]
else
[
../machines/dev/${vmName}
]
);
fediversityVm.name = vmName;
}; };
## FIXME: It would be preferrable to have those `sources`-related imports in
## the modules that use them. However, doing so triggers infinite recursions
## because of the way we propagate `sources`. `sources` must be propagated by
## means of `specialArgs`, but this requires a bigger change.
nixos.module.imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
"${sources.home-manager}/nixos"
];
imports = [
./common/resource.nix
];
};
## Given a list of machine names, make a deployment with those machines' ## Given a list of machine names, make a deployment with those machines'
## configurations as resources. ## configurations as resources.
makeDeployment = makeDeployment =
vmNames: vmNames:
{ providers, ... }: { providers, ... }:
{ {
# XXX: this type merge is for adding `specialArgs` to resource modules providers.local = inputs.nixops4.modules.nixops4Provider.local;
options.resources = mkOption { resources = genAttrs vmNames (vmName: {
type = type = providers.local.exec;
with lib.types; imports = [
lazyAttrsOf (submoduleWith { inputs.nixops4-nixos.modules.nixops4Resource.nixos
class = "nixops4Resource"; commonResourceModule
modules = [ ]; ../machines/dev/${vmName}
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs` ];
# once we're sure it's sane. leaving it here for better control during refactoring. });
specialArgs = {
inherit sources;
};
});
};
config = {
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
}; };
makeDeployment' = vmName: makeDeployment [ vmName ]; makeDeployment' = vmName: makeDeployment [ vmName ];
@ -110,21 +77,29 @@ let
fediversity = import ../services/fediversity; fediversity = import ../services/fediversity;
} }
{ {
garageConfigurationResource = makeResourceModule { garageConfigurationResource = {
vmName = "test01"; imports = [
isTestVm = true; commonResourceModule
../machines/operator/test01
];
}; };
mastodonConfigurationResource = makeResourceModule { mastodonConfigurationResource = {
vmName = "test06"; # somehow `test02` has a problem - use test06 instead imports = [
isTestVm = true; commonResourceModule
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
];
}; };
peertubeConfigurationResource = makeResourceModule { peertubeConfigurationResource = {
vmName = "test05"; imports = [
isTestVm = true; commonResourceModule
../machines/operator/test05
];
}; };
pixelfedConfigurationResource = makeResourceModule { pixelfedConfigurationResource = {
vmName = "test04"; imports = [
isTestVm = true; commonResourceModule
../machines/operator/test04
];
}; };
}; };
@ -137,54 +112,63 @@ let
## this is only needed to expose NixOS configurations for provisioning ## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4. ## purposes, and eventually all of this should be handled by NixOps4.
options = { options = {
nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { }; nixpkgs = mkOption { };
ssh = mkOption { }; ssh = mkOption { };
}; };
}; };
makeResourceConfig = makeResourceConfig =
vm: { vmName, isTestVm }:
(evalModules { (evalModules {
modules = [ modules = [
nixops4ResourceNixosMockOptions nixops4ResourceNixosMockOptions
(makeResourceModule vm) commonResourceModule
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
]; ];
}).config; }).config;
## Given a VM name, make a NixOS configuration for this machine. ## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration = makeConfiguration =
isTestVm: vmName: isTestVm: vmName:
let import "${sources.nixpkgs}/nixos" {
inherit (sources) nixpkgs; configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
in system = "x86_64-linux";
import "${nixpkgs}/nixos" {
modules = [
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
];
}; };
makeVmOptions = isTestVm: vmName: { makeVmOptions =
inherit ((makeResourceConfig { inherit vmName isTestVm; }).fediversityVm) isTestVm: vmName:
proxmox let
vmId config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
description in
if config.isFediversityVm then
sockets {
cores inherit (config)
memory vmId
diskSize description
sockets
hostPublicKey cores
unsafeHostPrivateKey memory
; diskSize
}; hostPublicKey
unsafeHostPrivateKey
;
}
else
null;
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path)); listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev; machines = listSubdirectories ../machines/dev;
testMachines = listSubdirectories ../machines/operator; testMachines = listSubdirectories ../machines/operator;
nixosConfigurations =
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true);
vmOptions =
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
(genAttrs machines (makeVmOptions false) // genAttrs testMachines (makeVmOptions true));
in in
{ {
_class = "flake"; _class = "flake";
@ -208,10 +192,23 @@ in
) )
); );
}; };
flake.nixosConfigurations = flake = { inherit nixosConfigurations vmOptions; };
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true); perSystem =
flake.vmOptions = { pkgs, ... }:
genAttrs machines (makeVmOptions false) {
// genAttrs testMachines (makeVmOptions true); checks =
mapAttrs' (name: nixosConfiguration: {
name = "nixosConfigurations-${name}";
value = nixosConfiguration.config.system.build.toplevel;
}) nixosConfigurations
// mapAttrs' (name: vmOptions: {
name = "vmOptions-${name}";
## Check that VM options builds/evaluates correctly. `deepSeq e1
## e2` evaluates `e1` strictly in depth before returning `e2`. We
## use this trick because checks need to be derivations, which VM
## options are not.
value = deepSeq vmOptions pkgs.hello;
}) vmOptions;
};
} }

View file

@ -179,15 +179,9 @@ grab_vm_options () {
--log-format raw --quiet --log-format raw --quiet
) )
proxmox=$(echo "$options" | jq -r .proxmox)
vm_id=$(echo "$options" | jq -r .vmId) vm_id=$(echo "$options" | jq -r .vmId)
description=$(echo "$options" | jq -r .description) description=$(echo "$options" | jq -r .description)
if [ "$proxmox" != fediversity ]; then
die "I do not know how to provision things that are not Fediversity VMs,
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
fi
sockets=$(echo "$options" | jq -r .sockets) sockets=$(echo "$options" | jq -r .sockets)
cores=$(echo "$options" | jq -r .cores) cores=$(echo "$options" | jq -r .cores)
memory=$(echo "$options" | jq -r .memory) memory=$(echo "$options" | jq -r .memory)

View file

@ -167,16 +167,10 @@ grab_vm_options () {
--log-format raw --quiet --log-format raw --quiet
) )
proxmox=$(echo "$options" | jq -r .proxmox)
vm_id=$(echo "$options" | jq -r .vmId) vm_id=$(echo "$options" | jq -r .vmId)
if [ "$proxmox" != fediversity ]; then printf 'done grabing VM options for VM %s. Got id: %d.\n' \
die "I do not know how to remove things that are not Fediversity VMs, "$vm_name" "$vm_id"
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
fi
printf 'done grabing VM options for VM %s. Found VM %d on %s Proxmox.\n' \
"$vm_name" "$vm_id" "$proxmox"
fi fi
} }

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "fedi200";
isFediversityVm = true;
vmId = 200; vmId = 200;
proxmox = "fediversity";
description = "Testing machine for Hans"; description = "Testing machine for Hans";
domain = "abundos.eu"; domain = "abundos.eu";
@ -16,10 +17,4 @@
gateway = "2a00:51c0:13:1305::1"; gateway = "2a00:51c0:13:1305::1";
}; };
}; };
nixos.module = {
imports = [
../../../infra/common/proxmox-qemu-vm.nix
];
};
} }

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "fedi201";
isFediversityVm = true;
vmId = 201; vmId = 201;
proxmox = "fediversity";
description = "FediPanel"; description = "FediPanel";
domain = "abundos.eu"; domain = "abundos.eu";
@ -19,7 +20,6 @@
nixos.module = { nixos.module = {
imports = [ imports = [
../../../infra/common/proxmox-qemu-vm.nix
./fedipanel.nix ./fedipanel.nix
]; ];
}; };

View file

@ -1,6 +1,5 @@
{ {
config, config,
sources,
... ...
}: }:
let let
@ -11,7 +10,6 @@ in
imports = [ imports = [
(import ../../../panel { }).module (import ../../../panel { }).module
"${sources.home-manager}/nixos"
]; ];
security.acme = { security.acme = {

View file

@ -20,7 +20,9 @@ in
ssh.host = mkForce "forgejo-ci"; ssh.host = mkForce "forgejo-ci";
fediversityVm = { fediversityVm = {
name = "forgejo-ci";
domain = "procolix.com"; domain = "procolix.com";
isFediversityVm = false;
ipv4 = { ipv4 = {
interface = "enp1s0f0"; interface = "enp1s0f0";

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "vm02116";
isFediversityVm = false;
vmId = 2116; vmId = 2116;
proxmox = "procolix";
description = "Forgejo"; description = "Forgejo";
ipv4.address = "185.206.232.34"; ipv4.address = "185.206.232.34";
@ -14,7 +15,6 @@
{ lib, ... }: { lib, ... }:
{ {
imports = [ imports = [
../../../infra/common/proxmox-qemu-vm.nix
./forgejo.nix ./forgejo.nix
]; ];

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "vm02187";
isFediversityVm = false;
vmId = 2187; vmId = 2187;
proxmox = "procolix";
description = "Wiki"; description = "Wiki";
ipv4.address = "185.206.232.187"; ipv4.address = "185.206.232.187";
@ -14,7 +15,6 @@
{ lib, ... }: { lib, ... }:
{ {
imports = [ imports = [
../../../infra/common/proxmox-qemu-vm.nix
./wiki.nix ./wiki.nix
]; ];

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test01";
isFediversityVm = true;
vmId = 7001; vmId = 7001;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test02";
isFediversityVm = true;
vmId = 7002; vmId = 7002;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test03";
isFediversityVm = true;
vmId = 7003; vmId = 7003;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test04";
isFediversityVm = true;
vmId = 7004; vmId = 7004;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test05";
isFediversityVm = true;
vmId = 7005; vmId = 7005;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test06";
isFediversityVm = true;
vmId = 7006; vmId = 7006;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test11";
isFediversityVm = true;
vmId = 7011; vmId = 7011;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test12";
isFediversityVm = true;
vmId = 7012; vmId = 7012;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test13";
isFediversityVm = true;
vmId = 7013; vmId = 7013;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;

View file

@ -2,8 +2,9 @@
_class = "nixops4Resource"; _class = "nixops4Resource";
fediversityVm = { fediversityVm = {
name = "test14";
isFediversityVm = true;
vmId = 7014; vmId = 7014;
proxmox = "fediversity";
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub; hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key; unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;