wip tofu test

This commit is contained in:
Kiara Grouwstra 2025-05-08 15:55:25 +02:00
parent 1019ac15b0
commit 43f1246ed7
Signed by: kiara
SSH key fingerprint: SHA256:COspvLoLJ5WC5rFb9ZDe5urVCkK4LJZOsjfF4duRJFU
17 changed files with 399 additions and 425 deletions

View file

@ -31,7 +31,7 @@ jobs:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-basic -L
- run: cd deployment && nix-build -A tests
check-launch:
runs-on: native

View file

@ -0,0 +1,45 @@
garage = {
target = "target"
nix_module = <<-EOF
{
fediversityVm = {
# vmId = 7014;
proxmox = "fediversity";
# hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
# unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
domain = "test.example";
ipv4 = {
address = "";
gateway = "";
};
ipv6 = {
address = "";
gateway = "";
};
};
}
EOF
}
mastodon = {
enable = true
target = "target"
nix_module = <<-EOF
{
fediversityVm = {
# vmId = 7014;
proxmox = "fediversity";
# hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
# unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
domain = "test.example";
ipv4 = {
address = "";
gateway = "";
};
ipv6 = {
address = "";
gateway = "";
};
};
}
EOF
}

View file

@ -1,32 +1,13 @@
{
inputs,
pkgs,
lib,
providers,
...
}:
{
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources.target = {
type = providers.local.exec;
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
ssh = {
host = "target";
hostPublicKey = builtins.readFile ./target_host_key.pub;
};
nixpkgs = inputs.nixpkgs;
nixos.module =
{ pkgs, ... }:
{
imports = [
./minimalTarget.nix
(lib.modules.importJSON ./target-network.json)
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.systemPackages = [ pkgs.cowsay ];
};
};
imports = [
./minimalTarget.nix
(lib.modules.importJSON ./target-network.json)
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.systemPackages = [ pkgs.cowsay ];
}

View file

@ -1,21 +0,0 @@
{ inputs, ... }:
{
nixops4Deployments.check-deployment-basic =
{ ... }:
{
imports = [
./deployment.nix
];
_module.args.inputs = inputs;
};
perSystem =
{ inputs', pkgs, ... }:
{
checks.deployment-basic = pkgs.callPackage ./nixosTest.nix {
nixops4-flake-in-a-bottle = inputs'.nixops4.packages.flake-in-a-bottle;
inherit inputs;
};
};
}

View file

@ -1,15 +1,30 @@
{
lib,
modulesPath,
pkgs,
...
}:
let
tf = pkgs.callPackage ../../../launch/tf.nix { };
tfEnv = pkgs.callPackage ../../../launch/tf-env.nix { };
in
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
];
environment.systemPackages = [
tf
tfEnv
pkgs.bash
pkgs.acl
pkgs.attr
pkgs.autoconf
pkgs.automake
pkgs.python3
];
## Test framework disables switching by default. That might be OK by itself,
## but we also use this config for getting the dependencies in
## `deployer.system.extraDependencies`.

View file

@ -1,161 +1,123 @@
{
testers,
inputs,
runCommandNoCC,
nixops4-flake-in-a-bottle,
...
}:
testers.runNixOSTest (
{
lib,
config,
hostPkgs,
...
}:
let
vmSystem = config.node.pkgs.hostPlatform.system;
pathToRoot = ../../..;
pathFromRoot = "deployment/check/basic";
deploymentName = "check-deployment-basic";
## TODO: sanity check the existence of (pathToRoot + "/flake.nix")
## TODO: sanity check that (pathToRoot + "/${pathFromRoot}" == ./.)
## The whole repository, with the flake at its root.
src = lib.fileset.toSource {
fileset = pathToRoot;
root = pathToRoot;
sources = import ../../../npins;
pkgs = hostPkgs;
tf = pkgs.callPackage ../../../launch/tf.nix { };
tfEnv = pkgs.callPackage ../../../launch/tf-env.nix { };
inherit (pkgs) lib;
inherit (import ../../../panel/env.nix { inherit lib pkgs; }) BIN_PATH;
tfVars = builtins.path {
name = "basic.tfvars";
path = ./basic.tfvars;
};
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
builtins.toJSON config.nodes.target.system.build.networkConfig
);
extraDependenciesFromMachine =
machine:
[
machine.system.build.toplevel.inputDerivation
machine.system.build.etc.inputDerivation
machine.system.build.etcBasedir.inputDerivation
machine.system.build.etcMetadataImage.inputDerivation
machine.system.build.extraUtils.inputDerivation
machine.system.path.inputDerivation
machine.system.build.setEnvironment.inputDerivation
machine.system.build.vm.inputDerivation
# machine.system.build.vmWithBootLoader.inputDerivation
machine.system.build.bootStage1.inputDerivation
machine.system.build.bootStage2.inputDerivation
]
++ lib.concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) machine.environment.etc
);
inherit (config) targetMachines pathToRoot pathFromRoot;
in
{
name = "deployment-basic";
imports = [
inputs.nixops4-nixos.modules.nixosTest.static
(import ../common/nixosTest.nix { inherit config lib hostPkgs; })
];
nodes = {
deployer =
{ pkgs, nodes, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${vmSystem}.default
];
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = "deployment/check/basic";
virtualisation = {
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4096;
diskSize = 10 * 1024;
cores = 2;
nodes =
{
deployer =
{ pkgs, nodes, ... }:
{
virtualisation = {
memorySize = 32 * 1024; # FIXME: trim down - maybe make it an option
diskSize = 50 * 1024; # FIXME: trim down - maybe make it an option
cores = 8; # FIXME: trim down - maybe make it an option
};
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
};
system.extraDependencies =
lib.attrValues sources
++ [
pkgs.stdenv
pkgs.stdenvNoCC
# pkgs
tfVars
pkgs.bash
pkgs.acl
pkgs.attr
pkgs.autoconf
pkgs.automake
pkgs.python3
]
++ lib.concatLists (
map (tm: extraDependenciesFromMachine nodes.${tm}) (targetMachines ++ [ "fake" ])
);
environment.systemPackages = [
tf
tfEnv
];
};
fake.imports = [ ../basic/minimalTarget.nix ];
}
// lib.genAttrs targetMachines (_: {
imports = [ ../basic/minimalTarget.nix ];
users.users.root.openssh.authorizedKeys.keyFiles = [
(pathToRoot + "/${pathFromRoot}/deployer.pub")
];
});
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
};
system.extraDependencies =
[
"${inputs.flake-parts}"
"${inputs.flake-parts.inputs.nixpkgs-lib}"
"${inputs.nixops4}"
"${inputs.nixops4-nixos}"
"${inputs.nixpkgs}"
pkgs.stdenv
pkgs.stdenvNoCC
pkgs.cowsay
pkgs.cowsay.inputDerivation # NOTE: Crucial!!!
## Some derivations will be different compared to target's initial
## state, so we'll need to be able to build something similar.
## Generally the derivation inputs aren't that different, so we
## use the initial state of the target as a base.
nodes.target.system.build.toplevel.inputDerivation
nodes.target.system.build.etc.inputDerivation
nodes.target.system.path.inputDerivation
nodes.target.system.build.bootStage1.inputDerivation
nodes.target.system.build.bootStage2.inputDerivation
]
++ lib.concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) nodes.target.environment.etc
);
};
target.imports = [ ./minimalTarget.nix ];
};
testScript = ''
start_all()
target.wait_for_unit("multi-user.target")
deployer.wait_for_unit("multi-user.target")
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${src} work")
with subtest("Configure the network"):
deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
deployer.succeed("mv /root/target-network.json work/${pathFromRoot}/target-network.json")
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
deployer.succeed(f"echo '{deployer_key}' > work/${pathFromRoot}/deployer.pub")
target.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
with subtest("Configure the target host key"):
target_host_key = target.succeed("ssh-keyscan target | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{target_host_key}' > work/${pathFromRoot}/target_host_key.pub")
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
with subtest("Override the lock"):
deployer.succeed("""
cd work
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input flake-parts ${inputs.flake-parts} \
--override-input nixops4 ${nixops4-flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${nixops4-flake-in-a-bottle} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
\
--override-input nixpkgs ${inputs.nixpkgs} \
--override-input git-hooks ${inputs.git-hooks} \
;
""")
extraTestScript = ''
with subtest("Check the status before deployment"):
target.fail("cowsay hi 1>&2")
hello.fail("hello 1>&2")
cowsay.fail("cowsay 1>&2")
with subtest("Validate config"):
deployer.wait_for_unit("multi-user.target")
deployer.succeed("${lib.getExe tf} -chdir='${tfEnv}/launch' validate")
with subtest("Run the deployment"):
deployer.succeed("cd work && nixops4 apply ${deploymentName} --show-trace --no-interactive")
deployer.succeed("PATH=${BIN_PATH} ${lib.getExe tf} -chdir='${tfEnv}/launch' apply --auto-approve -lock=false -parallelism=1 -var-file='${tfVars}'")
with subtest("Check the deployment"):
target.succeed("cowsay hi 1>&2")
hello.succeed("hello 1>&2")
cowsay.succeed("cowsay hi 1>&2")
'';
}
)

View file

@ -0,0 +1,163 @@
{
lib,
config,
hostPkgs,
...
}:
let
sources = import ../../../npins;
inherit (builtins)
filter
;
inherit (lib)
fileset
mkOption
genAttrs
concatLists
attrNames
;
## Helpers to map over target machines and produce an attrset, a list, or a
## multiline string suitable for use in a Python script.
# forAllTargetMachines = lib.genAttrs config.targetMachines;
# forAllTargetMachines' = f: map f config.targetMachines;
forConcat = xs: f: builtins.concatStringsSep "\n" (map f xs);
inherit (config) targetMachines pathToRoot pathFromRoot;
## The whole repository.
## FIXME: We could probably have fileset be the union of ./. with flake.nix
## and flake.lock - I doubt we need anything else.
src = fileset.toSource {
fileset = config.pathToRoot;
root = config.pathToRoot;
};
## Some derivations will be different compared to target's initial state, so
## we'll need to be able to build something similar. Generally the derivation
## inputs aren't that different, so we use the initial state of the target as
## a base.
extraDependenciesFromMachine =
machine:
[
machine.system.build.toplevel.inputDerivation
machine.system.build.etc.inputDerivation
machine.system.build.etcBasedir.inputDerivation
machine.system.build.etcMetadataImage.inputDerivation
machine.system.build.extraUtils.inputDerivation
machine.system.path.inputDerivation
machine.system.build.setEnvironment.inputDerivation
machine.system.build.vm.inputDerivation
# machine.system.build.vmWithBootLoader.inputDerivation
machine.system.build.bootStage1.inputDerivation
machine.system.build.bootStage2.inputDerivation
]
++ concatLists (
lib.mapAttrsToList (
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
) machine.environment.etc
);
allNodesButFake = filter (m: m != "fake") (attrNames config.nodes);
in
{
options = {
targetMachines = mkOption { };
pathToRoot = mkOption { };
## TODO: sanity check that (pathToRoot + "/${pathFromRoot}" == ./.)
pathFromRoot = mkOption { };
## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { };
};
config = {
nodes =
{
deployer =
{ pkgs, nodes, ... }:
{
virtualisation = {
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 32 * 1024; # FIXME: trim down - maybe make it an option
diskSize = 50 * 1024; # FIXME: trim down - maybe make it an option
cores = 8; # FIXME: trim down - maybe make it an option
};
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = 1;
};
system.extraDependencies =
lib.attrValues sources
++ [
# pkgs
pkgs.stdenv
pkgs.stdenvNoCC
]
++ concatLists (map (tm: extraDependenciesFromMachine nodes.${tm}) (targetMachines ++ [ "fake" ]));
};
## A “fake” node that can will not be started or used in the test, but
## whose configuration will be dumped in the deployer's extra
## dependencies. This is an indirect (and more pleasant) way of giving
## the deployer the right derivations to build the desired services.
fake.imports = [ ../basic/minimalTarget.nix ];
}
// genAttrs targetMachines (_: {
imports = [ ../basic/minimalTarget.nix ];
users.users.root.openssh.authorizedKeys.keyFiles = [
(pathToRoot + "/${pathFromRoot}/deployer.pub")
];
});
testScript = ''
${forConcat allNodesButFake (n: ''
${n}.start()
'')}
${forConcat allNodesButFake (n: ''
${n}.wait_for_unit("multi-user.target")
'')}
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${src} work")
with subtest("Configure the network"):
${forConcat targetMachines (
tm:
let
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
builtins.toJSON config.nodes.${tm}.system.build.networkConfig
);
in
''
deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
deployer.succeed("mv /root/target-network.json work/${pathFromRoot}/${tm}-network.json")
''
)}
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
deployer.succeed(f"echo '{deployer_key}' > work/${pathFromRoot}/deployer.pub")
${forConcat targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')}
with subtest("Configure the target host key"):
${forConcat targetMachines (tm: ''
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{host_key}' > work/${pathFromRoot}/${tm}_host_key.pub")
'')}
${config.extraTestScript}
'';
};
}

View file

@ -0,0 +1,7 @@
{
pkgs,
...
}:
{
basic = pkgs.callPackage ./basic/nixosTest.nix { };
}

View file

@ -1,194 +1,16 @@
## `makeMakeDeployment` -- Function to help hosting providers make a
## `makeDeployment` function.
##
## https://factoryfactoryfactory.net/
## Generic utilities used in this function, eg. nixpkgs, NixOps4 providers, etc.
## REVIEW: We should maybe be more specific than just `inputs`.
{
lib,
nixops4,
nixops4-nixos,
fediversity,
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
}:
## Information on the hosting provider's infrastructure. This is where we inform
## this function of where it can find eg. Proxmox.
{
## Four NixOS configuration resource modules for four services. Those are VMs
## that are already deployed and on which we will push our configurations.
##
## - Ultimately, we just want a pool of VMs, or even just a Proxmox.
## - Each machine is flagged for a certain use case until we control DNS.
garageConfigurationResource,
mastodonConfigurationResource,
peertubeConfigurationResource,
pixelfedConfigurationResource,
}:
tests = import ./check { inherit pkgs; };
## From the hosting provider's perspective, the function is meant to be
## partially applied only until here.
## Information on the specific deployment that we request. This is the
## information coming from the FediPanel.
##
## FIXME: lock step the interface with the definitions in the FediPanel
panelConfig:
let
inherit (lib) mkIf;
in
## Regular arguments of a NixOps4 deployment module.
{ providers, ... }:
{
options = {
deployment = lib.mkOption {
description = ''
Configuration to be deployed
'';
# XXX(@fricklerhandwerk):
# misusing this will produce obscure errors that will be truncated by NixOps4
type = lib.types.submodule ./options.nix;
};
};
config = {
providers = { inherit (nixops4.modules.nixops4Provider) local; };
resources =
let
## NOTE: All of these secrets are publicly available in this source file
## and will end up in the Nix store. We don't care as they are only ever
## used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
mastodonS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
};
peertubeS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
};
pixelfedS3KeyConfig =
{ pkgs, ... }:
{
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
};
makeConfigurationResource = resourceModule: config: {
type = providers.local.exec;
imports = [
nixops4-nixos.modules.nixops4Resource.nixos
resourceModule
{
## NOTE: With NixOps4, there are several levels and all of them live
## in the NixOS module system:
##
## 1. Each NixOps4 deployment is a module.
## 2. Each NixOps4 resource is a module. This very comment is
## inside an attrset imported as a module in a resource.
## 3. Each NixOps4 'configuration' resource contains an attribute
## 'nixos.module', itself a NixOS configuration module.
nixos.module =
{ ... }:
{
imports = [
config
fediversity
];
};
}
];
};
in
{
garage-configuration = makeConfigurationResource garageConfigurationResource (
{ pkgs, ... }:
mkIf (panelConfig.mastodon.enable || panelConfig.peertube.enable || panelConfig.pixelfed.enable) {
fediversity = {
inherit (panelConfig) domain;
garage.enable = true;
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
mastodon = mastodonS3KeyConfig { inherit pkgs; };
peertube = peertubeS3KeyConfig { inherit pkgs; };
};
}
);
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.mastodon.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
enable = true;
};
temp.cores = 1; # FIXME: should come from NixOps4 eventually
};
}
);
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.peertube.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
enable = true;
## NOTE: Only ever used for testing anyway.
##
## FIXME: Generate and store in NixOps4's state.
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
};
};
}
);
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
{ pkgs, ... }:
mkIf panelConfig.pixelfed.enable {
fediversity = {
inherit (panelConfig) domain;
temp.initialUser = {
inherit (panelConfig.initialUser) username email displayName;
# FIXME: disgusting, but nvm, this is going to be replaced by
# proper central authentication at some point
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
};
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
enable = true;
};
};
}
);
};
};
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
sources
system
pkgs
;
}

View file

@ -37,11 +37,11 @@ in
};
};
defaultGateway = {
defaultGateway = lib.mkIf (config.fediversityVm ? ipv4) {
address = config.fediversityVm.ipv4.gateway;
interface = "eth0";
};
defaultGateway6 = {
defaultGateway6 = lib.mkIf (config.fediversityVm ? ipv6) {
address = config.fediversityVm.ipv6.gateway;
interface = "eth0";
};

View file

@ -9,7 +9,12 @@ let
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix;
sources = import ../../npins;
inherit (sources) nixpkgs agenix disko home-manager;
inherit (sources)
nixpkgs
agenix
disko
home-manager
;
secretsPrefix = ../../secrets;
secrets = import (secretsPrefix + "/secrets.nix");
@ -49,9 +54,9 @@ in
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
})
}
) secrets;
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider

View file

@ -7,30 +7,17 @@ locals {
# user-facing applications
application_configs = {
# FIXME: wrap applications at the interface to grab them in one go?
mastodon = {
cfg = var.mastodon
hostname = "test06"
}
pixelfed = {
cfg = var.pixelfed
hostname = "test04"
}
peertube = {
cfg = var.peertube
hostname = "test05"
}
mastodon = var.mastodon
pixelfed = var.pixelfed
peertube = var.peertube
}
# services shared between applications
peripherals = { for name, inst in {
garage = "test01"
} : name => {
hostname = inst
cfg = {
# enable if any user applications are enabled
enable = anytrue([for _, app in local.application_configs: app.cfg.enable])
}
}
}
garage = var.garage
} : name => merge(inst, {
# enable if any user applications are enabled
enable = anytrue([for _, app in local.application_configs: app.enable])
}) }
}
# hash of our code directory, used in dev to trigger re-deploy
@ -46,7 +33,7 @@ resource "terraform_data" "nixos" {
for_each = {for name, inst in merge(
local.peripherals,
local.application_configs,
) : name => inst if inst.cfg.enable}
) : name => inst if inst.enable}
# trigger rebuild/deploy if (FIXME?) any potentially used config/code changed,
# preventing these (20+s, build being bottleneck) when nothing changed.
@ -84,6 +71,7 @@ resource "terraform_data" "nixos" {
# INSTANTIATE
command=(
nix-instantiate
--show-trace
--expr
'let
os = import <nixpkgs/nixos> {
@ -98,14 +86,14 @@ resource "terraform_data" "nixos" {
# for service `mastodon` import `mastodon.nix`
${path.root}/${each.key}.nix
# FIXME: get VM details from TF
${path.root}/../infra/test-machines/${each.value.hostname}
${each.value.nix_module}
];
# nix path for debugging
nix.nixPath = [ "${local.nix_path}" ];
## FIXME: switch root authentication to users with password-less sudo, see #24
users.users.root.openssh.authorizedKeys.keys = let
keys = import ../keys;
in attrValues keys.contributors ++ [
in builtins.attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
];
@ -114,7 +102,7 @@ resource "terraform_data" "nixos" {
builtins.fromJSON "${replace(jsonencode({
terraform = {
domain = var.domain
hostname = each.value.hostname
hostname = each.key
initialUser = var.initialUser
}
}), "\"", "\\\"")}";
@ -138,7 +126,7 @@ resource "terraform_data" "nixos" {
# set our variables using the json object
eval "export $(echo $json | jaq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
# FIXME: de-hardcode domain
host="root@${each.value.hostname}.abundos.eu" # FIXME: #24
host="root@${each.value.target}" # FIXME: #24
buildArgs=(
--option extra-binary-caches https://cache.nixos.org/
--option substituters $substituters

View file

@ -1,3 +1,4 @@
# interface between the nix module and TF
# TODO: could (part of) this be generated somehow? c.f #275
{
lib,

View file

@ -5,7 +5,7 @@
}:
let
inherit (lib) attrValues elem mkDefault;
inherit (lib) attrValues elem;
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
inherit (lib.strings) removeSuffix;
@ -15,7 +15,7 @@ let
in
{
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
# fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
## The configuration of the machine. We strive to keep in this file only the
## options that really need to be injected from the resource. Everything else
@ -30,7 +30,7 @@ in
## as `age.secrets.<name>.file`.
age.secrets = concatMapAttrs (
name: secret:
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
optionalAttrs (elem config.fediversityVm.name secret.publicKeys) {
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
}
) secrets;

View file

@ -1,3 +1,4 @@
# interface between the TF module and the caller
# TODO: (partially) generate, say from nix modules, c.f. #275
variable "domain" {
@ -5,31 +6,39 @@ variable "domain" {
default = "fediversity.net"
}
variable "garage" {
type = object({
target = optional(string, "test01.abundos.eu")
nix_module = optional(string, "../infra/test-machines/test01")
})
default = {}
}
variable "mastodon" {
type = object({
enable = bool
enable = optional(bool, false)
target = optional(string, "test06.abundos.eu")
nix_module = optional(string, "../infra/test-machines/test06")
})
default = {
enable = false
}
default = {}
}
variable "pixelfed" {
type = object({
enable = bool
enable = optional(bool, false)
target = optional(string, "test04.abundos.eu")
nix_module = optional(string, "../infra/test-machines/test04")
})
default = {
enable = false
}
default = {}
}
variable "peertube" {
type = object({
enable = bool
enable = optional(bool, false)
target = optional(string, "test05.abundos.eu")
nix_module = optional(string, "../infra/test-machines/test05")
})
default = {
enable = false
}
default = {}
}
variable "initialUser" {

View file

@ -26,20 +26,17 @@
"hash": "1w2gsy6qwxa5abkv8clb435237iifndcxq0s79wihqw11a5yb938"
},
"disko": {
"type": "GitRelease",
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "nix-community",
"repo": "disko"
},
"pre_releases": false,
"version_upper_bound": null,
"release_prefix": null,
"branch": "master",
"submodules": false,
"version": "v1.11.0",
"revision": "cdf8deded8813edfa6e65544f69fdd3a59fa2bb4",
"url": "https://api.github.com/repos/nix-community/disko/tarball/v1.11.0",
"hash": "13brimg7z7k9y36n4jc1pssqyw94nd8qvgfjv53z66lv4xkhin92"
"revision": "b5d1320ebc2f34dbea4655f95167f55e2130cdb3",
"url": "https://github.com/nix-community/disko/archive/b5d1320ebc2f34dbea4655f95167f55e2130cdb3.tar.gz",
"hash": "1dcakwcvbqapvd6c321kdrhki30dn1pbnffvzhdb0ab4gman9fcq"
},
"flake-inputs": {
"type": "Git",

View file

@ -5,7 +5,7 @@
{% for service_name, service_meta in services.items %}
{% if service_meta.enable %}
<li>
<a target="_blank" href={{ service_meta.url }}>{{ service_name }}</a>
<a target="_blank" href="https://{{ service_name }}.{{ services.domain }}">{{ service_name }}</a>
</li>
{% endif %}
{% endfor %}