factor out TF run.sh (#552)

Reviewed-on: fediversity/fediversity#552
This commit is contained in:
Kiara Grouwstra 2025-10-26 20:46:45 +01:00
parent 7a890ccd44
commit f6e40c9220
Signed by: kiara
SSH key fingerprint: SHA256:COspvLoLJ5WC5rFb9ZDe5urVCkK4LJZOsjfF4duRJFU
14 changed files with 342 additions and 449 deletions

View file

@ -12,7 +12,7 @@ on:
jobs:
_checks:
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-model-tf-proxmox","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-model-tf-proxmox","deployment-panel","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
runs-on: native
steps:
- run: true
@ -65,48 +65,6 @@ jobs:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -vL
nixops-deployment-providers-default:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-default -vL
nixops-deployment-providers-fedi200:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-fedi200 -vL
nixops-deployment-providers-fedi201:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-fedi201 -vL
nixops-deployment-providers-forgejo-ci:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-forgejo-ci -vL
nixops-deployment-providers-test:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-test -vL
nixops-deployment-providers-vm02116:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-vm02116 -vL
nixops-deployment-providers-vm02187:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.nixops-deployment-providers-vm02187 -vL
nixosConfigurations-fedi200:
runs-on: native
steps:

View file

@ -59,8 +59,6 @@ in
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox-template/run.sh
../../run/tf-proxmox-vm/run.sh
../../run/tf-proxmox-vm/await-ssh.sh
];

View file

@ -42,7 +42,7 @@ in
ssh = {
username = "root";
host = nodeName;
key-file = null;
key-file = "";
inherit sshOpts;
};
caller = "deployment/check/data-model-tf/data-model.nix";

View file

@ -30,9 +30,6 @@ in
{
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-single-host/run.sh
];
nodes.deployer =
{ ... }:
@ -71,7 +68,7 @@ in
deployer.wait_for_unit("multi-user.target")
deployer.succeed("curl -u basic:fake-secret -X GET http://localhost:8080/state/project1/example")
output = deployer.fail("""
${lib.getExe deploy}
${lib.getExe deploy} 2>&1
""")
assert "Timeout, server ${nodeName} not responding" in output
target.wait_for_unit("multi-user.target")

View file

@ -19,15 +19,7 @@ let
str
submodule
;
inherit (pkgs.callPackage ./utils.nix { }) toBash;
withPackages = packages: {
makeWrapperArgs = [
"--prefix"
"PATH"
":"
"${lib.makeBinPath packages}"
];
};
inherit (pkgs.callPackage ./utils.nix { }) toBash withPackages tfApply;
writeConfig =
{
system,
@ -246,6 +238,10 @@ let
key-file
sshOpts
;
in
tfApply {
inherit httpBackend;
directory = "tf-single-host";
environment = {
key_file = key-file;
ssh_opts = sshOpts;
@ -264,22 +260,7 @@ let
deployment-type = "tf-host";
};
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-single-host/tf.nix { };
tfDirs = [ "deployment/run/tf-single-host" ];
};
in
pkgs.writers.writeBashBin "deploy-tf.sh"
(withPackages [
pkgs.jq
(pkgs.callPackage ./run/tf-single-host/tf.nix { })
])
''
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
tf_env=${tf-env} bash ./deployment/run/tf-single-host/run.sh
'';
};
};
};
});
@ -344,19 +325,10 @@ let
node_name = node-name;
image_datastore_id = imageDatastoreId;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-template/tf.nix { };
tfDirs = [
"deployment/run/tf-proxmox-template"
];
};
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-template.sh"
(withPackages [
pkgs.jq
pkgs.qemu
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
@ -369,11 +341,13 @@ let
ls -l /tmp/${name}.qcow2 >&2
checksum="$(sha256sum /tmp/${name}.qcow2 | cut -d " " -f1)"
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
env \
TF_VAR_image=/tmp/${name}.qcow2 \
TF_VAR_checksum="$checksum" \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-template/run.sh
${lib.getExe (tfApply {
inherit httpBackend environment;
directory = "tf-proxmox-template";
})}
'';
};
};
@ -531,33 +505,12 @@ let
ipv6_gateway = ipv6Gateway;
ipv6_address = ipv6Address;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { };
tfDirs = [
"deployment/run/tf-single-host"
"deployment/run/tf-proxmox-vm"
];
};
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-vm.sh"
(withPackages [
pkgs.jq
pkgs.qemu
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
env ${
toString (
lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") (
lib.filterAttrs (_: v: v != null) environment
)
)
} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-vm/run.sh
'';
lib.trace (lib.strings.toJSON environment) (tfApply {
inherit httpBackend environment;
directory = "tf-proxmox-vm";
dependentDirs = [ "tf-single-host" ];
});
};
};
});

View file

@ -1,49 +0,0 @@
{ inputs, sources, ... }:
{
_class = "flake";
perSystem =
{ pkgs, system, ... }:
{
checks = {
proxmox-basic = import ./check/proxmox {
inherit (pkgs.testers) runNixOSTest;
inherit sources system;
};
deployment-basic = import ./check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-cli = import ./check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-panel = import ./check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-ssh = import ./check/data-model-ssh {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-nixops4 = import ./check/data-model-nixops4 {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-tf = import ./check/data-model-tf {
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./check/data-model-tf-proxmox {
inherit inputs sources system;
};
};
};
}

View file

@ -1,7 +0,0 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-template"
tofu apply --auto-approve -input=false -parallelism=1 >&2
tofu output -json

View file

@ -1,8 +0,0 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-vm"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -input=false -parallelism=1 >&2
tofu output -json

View file

@ -1,7 +0,0 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-single-host"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -parallelism=1

View file

@ -4,7 +4,7 @@
inputs ? null,
...
}:
{
rec {
evalModel =
module:
(lib.evalModules {
@ -17,6 +17,7 @@
module
];
}).config;
toBash =
v:
lib.replaceStrings [ "\"" ] [ "\\\"" ] (
@ -27,4 +28,51 @@
else
lib.strings.toJSON v
);
withPackages = packages: {
makeWrapperArgs = [
"--prefix"
"PATH"
":"
"${lib.makeBinPath packages}"
];
};
tfApply =
{
directory,
httpBackend,
dependentDirs ? [ ],
environment ? { },
# limit OOM risk
parallelism ? 1,
}:
let
env-vars = ''
${
toString (
lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") (
lib.filterAttrs (_: v: v != null) environment
)
)
} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
'';
tfPackage = pkgs.callPackage ./run/${directory}/tf.nix { };
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend tfPackage;
tfDirs = lib.lists.map (dir: "deployment/run/${dir}") ([ directory ] ++ dependentDirs);
};
in
pkgs.writers.writeBashBin "tf-apply.sh"
(withPackages [
tfPackage
pkgs.jq
])
''
set -e
dir="${tf-env}/deployment/run/${directory}"
env ${env-vars} tofu -chdir="$dir" apply --auto-approve -parallelism=${builtins.toString parallelism} >&2
env ${env-vars} tofu -chdir="$dir" output -json
'';
}

314
flake.nix
View file

@ -6,53 +6,287 @@
outputs =
inputs:
let
sources = import ./npins;
inherit (sources) nixpkgs;
architectures = [
"x86_64-linux"
];
lib = import "${nixpkgs}/lib";
forSystem = lib.genAttrs architectures;
overlays = [ ];
pkgsFor = forSystem (system: import nixpkgs { inherit system overlays; });
forPkgs =
f:
forSystem (
system:
f {
inherit system;
pkgs = pkgsFor.${system};
}
);
keys = import ./keys;
secrets = import ./secrets;
inherit (builtins) readDir readFile fromJSON;
inherit (lib)
attrNames
mkOption
evalModules
filterAttrs
mapAttrs'
deepSeq
;
inherit (lib.attrsets) genAttrs;
commonResourceModule = {
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
# flake-parts and have our own data model for how the project is organised
# internally
_module.args = {
inherit
inputs
keys
secrets
sources
;
};
## FIXME: It would be preferrable to have those `sources`-related imports in
## the modules that use them. However, doing so triggers infinite recursions
## because of the way we propagate `sources`. `sources` must be propagated by
## means of `specialArgs`, but this requires a bigger change.
nixos.module.imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
"${sources.home-manager}/nixos"
];
imports = [
./infra/common/resource.nix
];
};
## Given a list of machine names, make a deployment with those machines'
## configurations as resources.
makeDeployment =
vmNames:
{ providers, ... }:
{
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
commonResourceModule
./machines/dev/${vmName}
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];
## Given an attrset of test configurations (key = test machine name, value =
## NixOS configuration module), make a deployment with those machines'
## configurations as resources.
makeTestDeployment =
(import ./deployment)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ./services/fediversity;
}
{
garageConfigurationResource = {
imports = [
commonResourceModule
./machines/operator/test01
];
};
mastodonConfigurationResource = {
imports = [
commonResourceModule
./machines/operator/test06 # somehow `test02` has a problem - use test06 instead
];
};
peertubeConfigurationResource = {
imports = [
commonResourceModule
./machines/operator/test05
];
};
pixelfedConfigurationResource = {
imports = [
commonResourceModule
./machines/operator/test04
];
};
};
nixops4ResourceNixosMockOptions = {
## NOTE: We allow the use of a few options from
## `nixops4-nixos.modules.nixops4Resource.nixos` such that we can
## reuse modules that make use of them.
##
## REVIEW: We can probably do much better and cleaner. On the other hand,
## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4.
options = {
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { };
ssh = mkOption { };
};
};
makeResourceConfig =
{ vmName, isTestVm }:
(evalModules {
modules = [
nixops4ResourceNixosMockOptions
commonResourceModule
(if isTestVm then ./machines/operator/${vmName} else ./machines/dev/${vmName})
];
}).config;
## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration =
isTestVm: vmName:
import "${sources.nixpkgs}/nixos" {
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
system = "x86_64-linux";
};
makeVmOptions =
isTestVm: vmName:
let
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
in
if config.isFediversityVm then
{
inherit (config)
vmId
description
sockets
cores
memory
diskSize
hostPublicKey
unsafeHostPrivateKey
;
}
else
null;
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ./machines/dev;
testMachines = listSubdirectories ./machines/operator;
nixosConfigurations =
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true);
vmOptions =
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
(genAttrs machines (makeVmOptions false) // genAttrs testMachines (makeVmOptions true));
in
{
nixConfig = {
extra-trusted-substituters = "https://cache.saumon.network/proxmox-nixos";
extra-trusted-public-keys = "proxmox-nixos:D9RYSWpQQC/msZUWphOY2I5RLH5Dd6yQcaHIuug7dWM=";
};
}
// import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
"${sources.git-hooks}/flake-module.nix"
inputs.nixops4.modules.flake.default
./deployment/flake-part.nix
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
imports = [
"${sources.git-hooks}/flake-module.nix"
inputs.nixops4.modules.flake.default
];
perSystem =
{
pkgs,
lib,
system,
...
}:
{
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment.
## - We add a “default” deployment with all normal machines.
## - We add a “test” deployment with all test machines.
nixops4Deployments = genAttrs machines makeDeployment' // {
default = makeDeployment machines;
test = makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env != "" then
env
else
builtins.trace "env var DEPLOYMENT not set, falling back to ./deployment/configuration.sample.json!" (readFile ./deployment/configuration.sample.json)
)
);
};
flake = { inherit nixosConfigurations vmOptions; };
checks = forPkgs (
{ system, pkgs }:
{
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =
let
## Add a directory here if pre-commit hooks shouldn't apply to it.
optout = [ "npins" ];
excludes = map (dir: "^${dir}/") optout;
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
in
addExcludes {
nixfmt-rfc-style.enable = true;
deadnix.enable = true;
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
panel = (import ./. { inherit sources system; }).tests.panel.basic;
test-mastodon-service = pkgs.testers.runNixOSTest ./services/tests/mastodon.nix;
test-peertube-service = pkgs.testers.runNixOSTest ./services/tests/peertube.nix;
proxmox-basic = import ./deployment/check/proxmox {
inherit (pkgs.testers) runNixOSTest;
inherit sources system;
};
}
);
deployment-basic = import ./deployment/check/basic {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-cli = import ./deployment/check/cli {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-panel = import ./deployment/check/panel {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-ssh = import ./deployment/check/data-model-ssh {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-nixops4 = import ./deployment/check/data-model-nixops4 {
inherit (pkgs.testers) runNixOSTest;
inherit pkgs inputs sources;
};
deployment-model-tf = import ./deployment/check/data-model-tf {
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./deployment/check/data-model-tf-proxmox {
inherit inputs sources system;
};
}
// mapAttrs' (name: nixosConfiguration: {
name = "nixosConfigurations-${name}";
value = nixosConfiguration.config.system.build.toplevel;
}) nixosConfigurations
// mapAttrs' (name: vmOptions: {
name = "vmOptions-${name}";
## Check that VM options builds/evaluates correctly. `deepSeq e1
## e2` evaluates `e1` strictly in depth before returning `e2`. We
## use this trick because checks need to be derivations, which VM
## options are not.
value = deepSeq vmOptions pkgs.hello;
}) vmOptions
);
formatter = forPkgs ({ pkgs, ... }: pkgs.nixfmt-rfc-style);
};
}

View file

@ -1,214 +0,0 @@
{
inputs,
lib,
sources,
keys,
secrets,
...
}:
let
inherit (builtins) readDir readFile fromJSON;
inherit (lib)
attrNames
mkOption
evalModules
filterAttrs
mapAttrs'
deepSeq
;
inherit (lib.attrsets) genAttrs;
commonResourceModule = {
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
# flake-parts and have our own data model for how the project is organised
# internally
_module.args = {
inherit
inputs
keys
secrets
sources
;
};
## FIXME: It would be preferrable to have those `sources`-related imports in
## the modules that use them. However, doing so triggers infinite recursions
## because of the way we propagate `sources`. `sources` must be propagated by
## means of `specialArgs`, but this requires a bigger change.
nixos.module.imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
"${sources.agenix}/modules/age.nix"
"${sources.disko}/module.nix"
"${sources.home-manager}/nixos"
];
imports = [
./common/resource.nix
];
};
## Given a list of machine names, make a deployment with those machines'
## configurations as resources.
makeDeployment =
vmNames:
{ providers, ... }:
{
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
commonResourceModule
../machines/dev/${vmName}
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];
## Given an attrset of test configurations (key = test machine name, value =
## NixOS configuration module), make a deployment with those machines'
## configurations as resources.
makeTestDeployment =
(import ../deployment)
{
inherit lib;
inherit (inputs) nixops4 nixops4-nixos;
fediversity = import ../services/fediversity;
}
{
garageConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test01
];
};
mastodonConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
];
};
peertubeConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test05
];
};
pixelfedConfigurationResource = {
imports = [
commonResourceModule
../machines/operator/test04
];
};
};
nixops4ResourceNixosMockOptions = {
## NOTE: We allow the use of a few options from
## `nixops4-nixos.modules.nixops4Resource.nixos` such that we can
## reuse modules that make use of them.
##
## REVIEW: We can probably do much better and cleaner. On the other hand,
## this is only needed to expose NixOS configurations for provisioning
## purposes, and eventually all of this should be handled by NixOps4.
options = {
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
nixpkgs = mkOption { };
ssh = mkOption { };
};
};
makeResourceConfig =
{ vmName, isTestVm }:
(evalModules {
modules = [
nixops4ResourceNixosMockOptions
commonResourceModule
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
];
}).config;
## Given a VM name, make a NixOS configuration for this machine.
makeConfiguration =
isTestVm: vmName:
import "${sources.nixpkgs}/nixos" {
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
system = "x86_64-linux";
};
makeVmOptions =
isTestVm: vmName:
let
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
in
if config.isFediversityVm then
{
inherit (config)
vmId
description
sockets
cores
memory
diskSize
hostPublicKey
unsafeHostPrivateKey
;
}
else
null;
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
machines = listSubdirectories ../machines/dev;
testMachines = listSubdirectories ../machines/operator;
nixosConfigurations =
genAttrs machines (makeConfiguration false)
// genAttrs testMachines (makeConfiguration true);
vmOptions =
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
(genAttrs machines (makeVmOptions false) // genAttrs testMachines (makeVmOptions true));
in
{
_class = "flake";
## - Each normal or test machine gets a NixOS configuration.
## - Each normal or test machine gets a VM options entry.
## - Each normal machine gets a deployment.
## - We add a “default” deployment with all normal machines.
## - We add a “test” deployment with all test machines.
nixops4Deployments = genAttrs machines makeDeployment' // {
default = makeDeployment machines;
test = makeTestDeployment (
fromJSON (
let
env = builtins.getEnv "DEPLOYMENT";
in
if env != "" then
env
else
builtins.trace "env var DEPLOYMENT not set, falling back to ../deployment/configuration.sample.json!" (readFile ../deployment/configuration.sample.json)
)
);
};
flake = { inherit nixosConfigurations vmOptions; };
perSystem =
{ pkgs, ... }:
{
checks =
mapAttrs' (name: nixosConfiguration: {
name = "nixosConfigurations-${name}";
value = nixosConfiguration.config.system.build.toplevel;
}) nixosConfigurations
// mapAttrs' (name: vmOptions: {
name = "vmOptions-${name}";
## Check that VM options builds/evaluates correctly. `deepSeq e1
## e2` evaluates `e1` strictly in depth before returning `e2`. We
## use this trick because checks need to be derivations, which VM
## options are not.
value = deepSeq vmOptions pkgs.hello;
}) vmOptions;
};
}

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.keys = import ./.;
}

View file

@ -1,5 +0,0 @@
{
_class = "flake";
_module.args.secrets = import ./.;
}