forked from Fediversity/Fediversity
Compare commits
35 commits
ef25264045
...
50e1a768e7
Author | SHA1 | Date | |
---|---|---|---|
50e1a768e7 | |||
5a02027d48 | |||
2af6817cd8 | |||
20557422e9 | |||
4b85628ab1 | |||
03cbb4738d | |||
9769e1714c | |||
10ba2ee1e6 | |||
b7cf39534f | |||
ee6b990144 | |||
fe0edd897b | |||
081ae1ad07 | |||
3ae1235461 | |||
85cbdd945b | |||
d51f8fcf16 | |||
ae06cfc417 | |||
b7e34de835 | |||
458e565e4e | |||
0d36f32190 | |||
5c47da3b0b | |||
8e50fd675f | |||
b29b8bfb84 | |||
d67f5a2b7d | |||
4262a92741 | |||
e5b08faa1b | |||
c0b8dbbeeb | |||
1048ac674e | |||
d1d152d2df | |||
76a07a17ad | |||
f76d953b1f | |||
1f99a4c6c3 | |||
588bb77a94 | |||
df3a070fa4 | |||
be72b82875 | |||
1b66028f32 |
37 changed files with 991 additions and 205 deletions
|
@ -56,3 +56,35 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.deployment-panel -L
|
||||
|
||||
check-deployment-model:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.deployment-model -L
|
||||
|
||||
## NOTE: NixOps4 does not provide a good “dry run” mode, so we instead check
|
||||
## proxies for resources, namely whether their `.#vmOptions.<machine>` and
|
||||
## `.#nixosConfigurations.<machine>` outputs evaluate and build correctly, and
|
||||
## whether we can dry run `infra/proxmox-*.sh` on them. This will not catch
|
||||
## everything, and in particular not issues in how NixOps4 wires up the
|
||||
## resources, but that is still something.
|
||||
check-resources:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: |
|
||||
set -euC
|
||||
echo ==================== [ VM Options ] ====================
|
||||
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).vmOptions)')
|
||||
for machine in $machines; do
|
||||
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
|
||||
nix build .#checks.x86_64-linux.vmOptions-$machine
|
||||
done
|
||||
echo
|
||||
echo ==================== [ NixOS Configurations ] ====================
|
||||
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).nixosConfigurations)')
|
||||
for machine in $machines; do
|
||||
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
|
||||
nix build .#checks.x86_64-linux.nixosConfigurations-$machine
|
||||
done
|
||||
|
|
|
@ -11,7 +11,8 @@ let
|
|||
;
|
||||
inherit (pkgs) lib;
|
||||
inherit (import sources.flake-inputs) import-flake;
|
||||
inherit ((import-flake { src = ./.; }).inputs) nixops4;
|
||||
inputs = (import-flake { src = ./.; }).inputs;
|
||||
inherit (inputs) nixops4;
|
||||
panel = import ./panel { inherit sources system; };
|
||||
pre-commit-check =
|
||||
(import "${git-hooks}/nix" {
|
||||
|
@ -78,6 +79,7 @@ in
|
|||
# re-export inputs so they can be overridden granularly
|
||||
# (they can't be accessed from the outside any other way)
|
||||
inherit
|
||||
inputs
|
||||
sources
|
||||
system
|
||||
pkgs
|
||||
|
|
|
@ -49,21 +49,23 @@ in
|
|||
substituters = mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
download-attempts = 1;
|
||||
extra-experimental-features = "flakes";
|
||||
};
|
||||
|
||||
system.extraDependencies =
|
||||
[
|
||||
inputs.nixops4
|
||||
inputs.nixops4-nixos
|
||||
inputs.nixpkgs
|
||||
sources.nixpkgs
|
||||
|
||||
sources.flake-parts
|
||||
sources.flake-inputs
|
||||
sources.git-hooks
|
||||
|
||||
pkgs.stdenv
|
||||
pkgs.stdenvNoCC
|
||||
pkgs.automake
|
||||
pkgs.autoconf
|
||||
pkgs.binutils
|
||||
pkgs.bison
|
||||
]
|
||||
++ (
|
||||
let
|
||||
|
@ -95,7 +97,35 @@ in
|
|||
machine.system.build.vm.inputDerivation
|
||||
machine.system.build.bootStage1.inputDerivation
|
||||
machine.system.build.bootStage2.inputDerivation
|
||||
pkgs.automake.inputDerivation
|
||||
pkgs.autoconf.inputDerivation
|
||||
pkgs.bash.inputDerivation
|
||||
pkgs.binutils.inputDerivation
|
||||
pkgs.bison.inputDerivation
|
||||
]
|
||||
++ concatLists (
|
||||
lib.lists.map (
|
||||
pkg:
|
||||
if
|
||||
pkg ? inputDerivation
|
||||
# error: output '/nix/store/dki9d3vldafg9ydrfm7x0g0rr0qljk98-sudo-1.9.16p2' is not allowed to refer to the following paths:
|
||||
# /nix/store/2xdmps65ryklmbf025bm4pxv16gb8ajv-sudo-1.9.16p2.tar.gz
|
||||
# /nix/store/58br4vk3q5akf4g8lx0pqzfhn47k3j8d-bash-5.2p37
|
||||
# /nix/store/8v6k283dpbc0qkdq81nb6mrxrgcb10i1-gcc-wrapper-14-20241116
|
||||
# /nix/store/9r1nl9ksiyszy4qzzg6y2gcdkca0xmhy-stdenv-linux
|
||||
# /nix/store/a4rmp6in7igbl1wbz9pli5nq0wiclq0y-groff-1.23.0
|
||||
# /nix/store/dki9d3vldafg9ydrfm7x0g0rr0qljk98-sudo-1.9.16p2
|
||||
# /nix/store/f5y58qz2fzpzgkhp0nizixi10x04ppyy-linux-pam-1.6.1
|
||||
# /nix/store/shkw4qm9qcw5sc5n1k5jznc83ny02r39-default-builder.sh
|
||||
# /nix/store/vj1c3wf9c11a0qs6p3ymfvrnsdgsdcbq-source-stdenv.sh
|
||||
# /nix/store/yh6qg1nsi5h2xblcr67030pz58fsaxx3-coreutils-9.6
|
||||
&& !(lib.strings.hasInfix "sudo" (builtins.toString pkg))
|
||||
then
|
||||
[ pkg.inputDerivation ]
|
||||
else
|
||||
[ ]
|
||||
) machine.environment.systemPackages
|
||||
)
|
||||
++ concatLists (
|
||||
lib.mapAttrsToList (
|
||||
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
|
||||
|
|
|
@ -48,7 +48,8 @@ in
|
|||
extraTestScript = mkOption { };
|
||||
|
||||
sourceFileset = mkOption {
|
||||
## REVIEW: Upstream to nixpkgs?
|
||||
## FIXME: grab `lib.types.fileset` from NixOS, once upstreaming PR
|
||||
## https://github.com/NixOS/nixpkgs/pull/428293 lands.
|
||||
type = types.mkOptionType {
|
||||
name = "fileset";
|
||||
description = "fileset";
|
||||
|
|
|
@ -18,6 +18,7 @@ in
|
|||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
(modulesPath + "/../lib/testing/nixos-test-base.nix")
|
||||
./sharedOptions.nix
|
||||
../../../infra/common/nixos/users.nix
|
||||
];
|
||||
|
||||
config = mkMerge [
|
||||
|
@ -31,6 +32,9 @@ in
|
|||
## Not used; save a large copy operation
|
||||
channel.enable = false;
|
||||
registry = lib.mkForce { };
|
||||
settings = {
|
||||
download-attempts = 1;
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
|
@ -38,10 +42,46 @@ in
|
|||
settings.PermitRootLogin = "yes";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 22 ];
|
||||
networking = {
|
||||
firewall.enable = false;
|
||||
enableIPv6 = false;
|
||||
};
|
||||
|
||||
services.getty.autologinUser = lib.mkForce "root";
|
||||
|
||||
## Test VMs don't have a bootloader by default.
|
||||
boot.loader.grub.enable = false;
|
||||
# boot.loader = {
|
||||
# # GRUB enabled: installation of GRUB on /dev/disk/by-id/virtio-root failed: No such file or directory
|
||||
# grub.enable = false;
|
||||
# # systemd boot enabled: '/boot' is not a mounted partition. Is the path configured correctly?
|
||||
# systemd-boot.enable = true;
|
||||
# efi.canTouchEfiVariables = true;
|
||||
# };
|
||||
# # same issue as no bootloader
|
||||
# boot.loader.generic-extlinux-compatible.enable = false;
|
||||
# builds but won't boot back up
|
||||
boot.loader.grub.forceInstall = true;
|
||||
# # builds but won't boot back up
|
||||
# # to be used with --no-bootloader, which i could only find for flakes
|
||||
# boot.loader.grub.enable = false;
|
||||
|
||||
users.mutableUsers = false;
|
||||
users.users.root = {
|
||||
password = "password";
|
||||
hashedPassword = null;
|
||||
hashedPasswordFile = null;
|
||||
openssh.authorizedKeys.keys =
|
||||
let
|
||||
keys = import ../../../keys;
|
||||
in
|
||||
lib.attrValues keys.contributors
|
||||
++ [
|
||||
# allow our panel vm access to the test machines
|
||||
keys.panel
|
||||
# allow continuous deployment access
|
||||
keys.cd
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
(mkIf config.enableAcme {
|
||||
|
|
168
deployment/check/data-model/common-nixosTest.nix
Normal file
168
deployment/check/data-model/common-nixosTest.nix
Normal file
|
@ -0,0 +1,168 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
config,
|
||||
hostPkgs,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins)
|
||||
concatStringsSep
|
||||
toJSON
|
||||
;
|
||||
inherit (lib)
|
||||
types
|
||||
fileset
|
||||
mkOption
|
||||
genAttrs
|
||||
attrNames
|
||||
optionalString
|
||||
;
|
||||
inherit (hostPkgs)
|
||||
writeText
|
||||
;
|
||||
|
||||
forConcat = xs: f: concatStringsSep "\n" (map f xs);
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixosTest";
|
||||
|
||||
imports = [
|
||||
../common/sharedOptions.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
## FIXME: I wish I could just use `testScript` but with something like
|
||||
## `mkOrder` to put this module's string before something else.
|
||||
extraTestScript = mkOption { };
|
||||
|
||||
sourceFileset = mkOption {
|
||||
## REVIEW: Upstream to nixpkgs?
|
||||
type = types.mkOptionType {
|
||||
name = "fileset";
|
||||
description = "fileset";
|
||||
descriptionClass = "noun";
|
||||
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
|
||||
merge = (_: defs: fileset.unions (map (x: x.value) defs));
|
||||
};
|
||||
description = ''
|
||||
A fileset that will be copied to the deployer node in the current
|
||||
working directory. This should contain all the files that are
|
||||
necessary to run that particular test, such as the NixOS
|
||||
modules necessary to evaluate a deployment.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
sourceFileset = fileset.unions [
|
||||
../../../mkFlake.nix
|
||||
../../../flake.lock
|
||||
../../../npins
|
||||
../../data-model.nix
|
||||
../../function.nix
|
||||
|
||||
../common/sharedOptions.nix
|
||||
../common/targetNode.nix
|
||||
../common/targetResource.nix
|
||||
];
|
||||
|
||||
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
|
||||
|
||||
nodes =
|
||||
{
|
||||
deployer = {
|
||||
imports = [ ../common/deployerNode.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
|
||||
};
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
(
|
||||
if config.enableAcme then
|
||||
{
|
||||
acme = {
|
||||
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
|
||||
## break things once we play with DNS?
|
||||
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
|
||||
## We aren't testing ACME - we just want certificates.
|
||||
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
|
||||
};
|
||||
}
|
||||
else
|
||||
{ }
|
||||
)
|
||||
|
||||
//
|
||||
|
||||
genAttrs config.targetMachines (_: {
|
||||
imports = [ ../common/targetNode.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
|
||||
});
|
||||
|
||||
testScript = ''
|
||||
${forConcat (attrNames config.nodes) (n: ''
|
||||
${n}.start()
|
||||
'')}
|
||||
|
||||
${forConcat (attrNames config.nodes) (n: ''
|
||||
${n}.wait_for_unit("multi-user.target")
|
||||
'')}
|
||||
|
||||
## A subset of the repository that is necessary for this test. It will be
|
||||
## copied inside the test. The smaller this set, the faster our CI, because we
|
||||
## won't need to re-run when things change outside of it.
|
||||
with subtest("Unpacking"):
|
||||
deployer.succeed("cp -r --no-preserve=mode ${
|
||||
fileset.toSource {
|
||||
root = ../../..;
|
||||
fileset = config.sourceFileset;
|
||||
}
|
||||
}/* .")
|
||||
|
||||
with subtest("Configure the network"):
|
||||
${forConcat config.targetMachines (
|
||||
tm:
|
||||
let
|
||||
targetNetworkJSON = writeText "target-network.json" (
|
||||
toJSON config.nodes.${tm}.system.build.networkConfig
|
||||
);
|
||||
in
|
||||
''
|
||||
deployer.copy_from_host("${targetNetworkJSON}", "${config.pathFromRoot}/${tm}-network.json")
|
||||
''
|
||||
)}
|
||||
|
||||
with subtest("Configure the deployer key"):
|
||||
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
|
||||
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
|
||||
${forConcat config.targetMachines (tm: ''
|
||||
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
|
||||
'')}
|
||||
|
||||
with subtest("Configure the target host key"):
|
||||
${forConcat config.targetMachines (tm: ''
|
||||
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
|
||||
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
|
||||
'')}
|
||||
|
||||
# with subtest("Override the flake and its lock"):
|
||||
# deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
|
||||
|
||||
${optionalString config.enableAcme ''
|
||||
with subtest("Set up handmade DNS"):
|
||||
deployer.succeed("echo '${config.nodes.acme.networking.primaryIPAddress}' > ${config.pathFromRoot}/acme_server_ip")
|
||||
''}
|
||||
|
||||
${config.extraTestScript}
|
||||
'';
|
||||
};
|
||||
}
|
7
deployment/check/data-model/constants.nix
Normal file
7
deployment/check/data-model/constants.nix
Normal file
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
targetMachines = [
|
||||
"hello"
|
||||
];
|
||||
pathToRoot = ../../..;
|
||||
pathFromRoot = ./.;
|
||||
}
|
16
deployment/check/data-model/default.nix
Normal file
16
deployment/check/data-model/default.nix
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
runNixOSTest,
|
||||
inputs,
|
||||
sources,
|
||||
}:
|
||||
|
||||
runNixOSTest {
|
||||
imports = [
|
||||
../../data-model.nix
|
||||
../../function.nix
|
||||
./common-nixosTest.nix
|
||||
./nixosTest.nix
|
||||
];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
|
||||
}
|
53
deployment/check/data-model/deployment.nix
Normal file
53
deployment/check/data-model/deployment.nix
Normal file
|
@ -0,0 +1,53 @@
|
|||
{
|
||||
inputs,
|
||||
# sources,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
|
||||
eval =
|
||||
module:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit inputs;
|
||||
};
|
||||
modules = [
|
||||
module
|
||||
../../data-model.nix
|
||||
];
|
||||
}).config;
|
||||
fediversity = eval (
|
||||
{ ... }:
|
||||
{
|
||||
config = {
|
||||
environments.single-nixos-vm =
|
||||
{ ... }:
|
||||
{
|
||||
implementation = requests: {
|
||||
input = requests;
|
||||
output.ssh-host = {
|
||||
ssh = {
|
||||
host = "localhost";
|
||||
username = "root";
|
||||
authentication.password = "password";
|
||||
};
|
||||
nixos-configuration =
|
||||
{ ... }:
|
||||
{
|
||||
users.users = config.resources.shell.login-shell.apply (
|
||||
lib.filterAttrs (_name: value: value ? login-shell) requests
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
fediversity.environments.single-nixos-vm.deployment {
|
||||
enable = true;
|
||||
}
|
151
deployment/check/data-model/nixosTest.nix
Normal file
151
deployment/check/data-model/nixosTest.nix
Normal file
|
@ -0,0 +1,151 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (import ./constants.nix) targetMachines pathToRoot;
|
||||
in
|
||||
{
|
||||
_class = "nixosTest";
|
||||
|
||||
name = "deployment-model";
|
||||
|
||||
sourceFileset = lib.fileset.unions [
|
||||
../../data-model.nix
|
||||
../../function.nix
|
||||
./constants.nix
|
||||
./deployment.nix
|
||||
];
|
||||
|
||||
nodes.deployer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
jq
|
||||
automake
|
||||
autoconf
|
||||
];
|
||||
|
||||
# FIXME: sad times
|
||||
system.extraDependencies = with pkgs; [
|
||||
jq
|
||||
jq.inputDerivation
|
||||
automake
|
||||
autoconf
|
||||
];
|
||||
|
||||
system.extraDependenciesFromModule =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
hello
|
||||
cowsay
|
||||
automake
|
||||
autoconf
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
extraTestScript = ''
|
||||
with subtest("Check the status before deployment"):
|
||||
hello.fail("hello 1>&2")
|
||||
|
||||
${lib.concatStringsSep "\n" (
|
||||
lib.lists.map (nodeName: ''
|
||||
with subtest("Run the deployment for ${nodeName}"):
|
||||
deployer.succeed("""
|
||||
set -euo pipefail
|
||||
|
||||
# INSTANTIATE
|
||||
command=(
|
||||
nix-instantiate
|
||||
--expr
|
||||
|
||||
'
|
||||
let
|
||||
args = builtins.fromJSON "${
|
||||
lib.replaceStrings [ "\"" ] [ "\\\\\"" ] (
|
||||
lib.strings.toJSON {
|
||||
inherit sources;
|
||||
}
|
||||
)
|
||||
}";
|
||||
inherit (args) sources;
|
||||
configuration = { pkgs, config, ... }: {
|
||||
imports = [
|
||||
${pathToRoot}/deployment/check/common/sharedOptions.nix
|
||||
${pathToRoot}/deployment/check/common/targetNode.nix
|
||||
${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix
|
||||
];
|
||||
|
||||
enableAcme = ${lib.strings.toJSON config.enableAcme};
|
||||
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
hello
|
||||
automake
|
||||
autoconf
|
||||
];
|
||||
};
|
||||
eval = import "${sources.nixpkgs}/nixos/lib/eval-config.nix" {
|
||||
system = builtins.currentSystem;
|
||||
specialArgs = {
|
||||
inherit sources;
|
||||
};
|
||||
modules = [ configuration ];
|
||||
};
|
||||
os = {
|
||||
inherit (eval) pkgs config options;
|
||||
system = eval.config.system.build.toplevel;
|
||||
inherit (eval.config.system.build) vm vmWithBootLoader;
|
||||
};
|
||||
in
|
||||
# import "${pathToRoot}/deployment/nixos.nix" {}
|
||||
{
|
||||
drv_path = os.config.system.build.toplevel.drvPath;
|
||||
out_path = os.config.system.build.toplevel;
|
||||
}
|
||||
'
|
||||
)
|
||||
# instantiate the config in /nix/store
|
||||
"''${command[@]}" -A out_path
|
||||
# get the other info
|
||||
json="$("''${command[@]}" --eval --strict --json)"
|
||||
|
||||
# DEPLOY
|
||||
declare drv_path
|
||||
# set our variables using the json object
|
||||
eval "export $(echo $json | jq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
|
||||
host="root@${nodeName}"
|
||||
sshOpts=(
|
||||
-o BatchMode=yes
|
||||
-o StrictHostKeyChecking=no
|
||||
-o "ConnectionAttempts=1"
|
||||
-o "ConnectTimeout=1"
|
||||
-o "ServerAliveCountMax=1"
|
||||
-o "ServerAliveInterval=1"
|
||||
)
|
||||
# get the realized derivation to deploy
|
||||
outPath=$(nix-store --realize "$drv_path")
|
||||
# deploy the config by nix-copy-closure
|
||||
NIX_SSHOPTS="''${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
|
||||
# switch the remote host to the config
|
||||
output=$(ssh "''${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; nohup $outPath/bin/switch-to-configuration switch &" 2>&1) || echo "status code: $?"
|
||||
echo "output: $output"
|
||||
if [[ $output != *"Timeout, server ${nodeName} not responding"* ]]; then
|
||||
echo "non-timeout error: $output"
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
""")
|
||||
${nodeName}.wait_for_unit("multi-user.target")
|
||||
${nodeName}.succeed("systemctl is-active sshd")
|
||||
${nodeName}.succeed("${nodeName} 1>&2")
|
||||
'') targetMachines
|
||||
)}
|
||||
'';
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
let
|
||||
inherit (import ../default.nix { }) pkgs inputs;
|
||||
inherit (pkgs) lib;
|
||||
inherit (lib) mkOption;
|
||||
inherit (lib) mkOption types;
|
||||
eval =
|
||||
module:
|
||||
(lib.evalModules {
|
||||
|
@ -13,17 +13,82 @@ let
|
|||
./data-model.nix
|
||||
];
|
||||
}).config;
|
||||
inherit (inputs.nixops4.lib) mkDeployment;
|
||||
in
|
||||
{
|
||||
_class = "nix-unit";
|
||||
|
||||
test-eval = {
|
||||
/**
|
||||
This tests a very simple arrangement that features all ingredients of the Fediversity business logic:
|
||||
application, resource, environment, deployment; and wires it all up in one end-to-end exercise.
|
||||
- The dummy resource is a login shell made available for some user.
|
||||
- The dummy application is `hello` that requires a shell to be deployed.
|
||||
- The dummy environment is a single NixOS VM that hosts one login shell, for the operator.
|
||||
- The dummy configuration enables the `hello` application.
|
||||
This will produce a NixOps4 deployment for a NixOS VM with a login shell for the operator and `hello` available.
|
||||
*/
|
||||
expr =
|
||||
let
|
||||
fediversity = eval (
|
||||
{ config, ... }:
|
||||
{ config, options, ... }:
|
||||
{
|
||||
config = {
|
||||
resources.login-shell = {
|
||||
description = "The operator needs to be able to log into the shell";
|
||||
request =
|
||||
{ ... }:
|
||||
{
|
||||
_class = "fediversity-resource-request";
|
||||
options = {
|
||||
wheel = mkOption {
|
||||
description = "Whether the login user needs root permissions";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
packages = mkOption {
|
||||
description = "Packages that need to be available in the user environment";
|
||||
type = with types; attrsOf package;
|
||||
};
|
||||
};
|
||||
};
|
||||
policy =
|
||||
{ config, ... }:
|
||||
{
|
||||
_class = "fediversity-resource-policy";
|
||||
options = {
|
||||
username = mkOption {
|
||||
description = "Username for the operator";
|
||||
type = types.str; # TODO: use the proper constraints from NixOS
|
||||
};
|
||||
wheel = mkOption {
|
||||
description = "Whether to allow login with root permissions";
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
};
|
||||
config = {
|
||||
resource-type = types.raw; # TODO: splice out the user type from NixOS
|
||||
apply =
|
||||
requests:
|
||||
let
|
||||
# Filter out requests that need wheel if policy doesn't allow it
|
||||
validRequests = lib.filterAttrs (
|
||||
_name: req: !req.login-shell.wheel || config.wheel
|
||||
) requests.resources;
|
||||
in
|
||||
lib.optionalAttrs (validRequests != { }) {
|
||||
${config.username} = {
|
||||
isNormalUser = true;
|
||||
packages =
|
||||
with lib;
|
||||
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
|
||||
extraGroups = lib.optional config.wheel "wheel";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
applications.hello =
|
||||
{ ... }:
|
||||
{
|
||||
|
@ -31,15 +96,42 @@ in
|
|||
module =
|
||||
{ ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "Hello in the shell";
|
||||
options.enable = lib.mkEnableOption "Hello in the shell";
|
||||
};
|
||||
implementation = cfg: {
|
||||
input = cfg;
|
||||
output = lib.optionalAttrs cfg.enable {
|
||||
resources.hello.login-shell.packages.hello = pkgs.hello;
|
||||
};
|
||||
};
|
||||
};
|
||||
environments.single-nixos-vm =
|
||||
{ config, ... }:
|
||||
{
|
||||
resources.operator-environment.login-shell.username = "operator";
|
||||
implementation = requests: {
|
||||
input = requests;
|
||||
output.nixops4 =
|
||||
{ providers, ... }:
|
||||
{
|
||||
providers = {
|
||||
inherit (inputs.nixops4.modules.nixops4Provider) local;
|
||||
};
|
||||
resources.the-machine = {
|
||||
type = providers.local.exec;
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixops4Resource.nixos
|
||||
];
|
||||
nixos.module =
|
||||
{ ... }:
|
||||
{
|
||||
users.users = config.resources.shell.login-shell.apply (
|
||||
lib.filterAttrs (_name: value: value ? login-shell) requests
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
implementation =
|
||||
cfg:
|
||||
lib.optionalAttrs cfg.enable {
|
||||
dummy.login-shell.packages.hello = pkgs.hello;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
options = {
|
||||
|
@ -51,20 +143,64 @@ in
|
|||
applications.hello.enable = true;
|
||||
};
|
||||
};
|
||||
example-deployment = mkOption {
|
||||
type = options.deployments.nestedType;
|
||||
readOnly = true;
|
||||
default = config.environments.single-nixos-vm.deployment config.example-configuration;
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
resources = fediversity.applications.hello.resources fediversity.example-configuration.applications.hello;
|
||||
hello-shell = resources.resources.hello.login-shell;
|
||||
environment = fediversity.environments.single-nixos-vm.resources.operator-environment.login-shell;
|
||||
result = mkDeployment {
|
||||
modules = [
|
||||
(fediversity.environments.single-nixos-vm.deployment fediversity.example-configuration)
|
||||
];
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
inherit (fediversity)
|
||||
example-configuration
|
||||
;
|
||||
number-of-resources = with lib; length (attrNames fediversity.resources);
|
||||
inherit (fediversity) example-configuration;
|
||||
hello-package-exists = hello-shell.packages ? hello;
|
||||
wheel-required = hello-shell.wheel;
|
||||
wheel-allowed = environment.wheel;
|
||||
operator-shell =
|
||||
let
|
||||
operator = (environment.apply resources).operator;
|
||||
in
|
||||
{
|
||||
inherit (operator) isNormalUser;
|
||||
packages = map (p: "${p.pname}") operator.packages;
|
||||
extraGroups = operator.extraGroups;
|
||||
};
|
||||
deployment = {
|
||||
inherit (result) _type;
|
||||
deploymentFunction = lib.isFunction result.deploymentFunction;
|
||||
getProviders = lib.isFunction result.getProviders;
|
||||
};
|
||||
};
|
||||
expected = {
|
||||
number-of-resources = 1;
|
||||
example-configuration = {
|
||||
enable = true;
|
||||
applications.hello.enable = true;
|
||||
};
|
||||
hello-package-exists = true;
|
||||
wheel-required = false;
|
||||
wheel-allowed = false;
|
||||
operator-shell = {
|
||||
isNormalUser = true;
|
||||
packages = [ "hello" ];
|
||||
extraGroups = [ ];
|
||||
};
|
||||
deployment = {
|
||||
_type = "nixops4Deployment";
|
||||
deploymentFunction = true;
|
||||
getProviders = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,28 +6,118 @@
|
|||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (lib.types)
|
||||
attrsOf
|
||||
attrTag
|
||||
attrsOf
|
||||
deferredModuleWith
|
||||
submodule
|
||||
optionType
|
||||
functionTo
|
||||
nullOr
|
||||
optionType
|
||||
raw
|
||||
str
|
||||
submodule
|
||||
;
|
||||
|
||||
functionType = import ./function.nix;
|
||||
application-resources = {
|
||||
application-resources = submodule {
|
||||
options.resources = mkOption {
|
||||
# TODO: maybe transpose, and group the resources by type instead
|
||||
type = attrsOf (
|
||||
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
|
||||
attrTag (
|
||||
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.request; }) config.resources
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
nixos-configuration = mkOption {
|
||||
description = "A NixOS configuration.";
|
||||
type = raw;
|
||||
};
|
||||
host-ssh = mkOption {
|
||||
description = "SSH connection info to connect to a single host.";
|
||||
type = submodule {
|
||||
options = {
|
||||
host = mkOption {
|
||||
description = "the host to access by SSH";
|
||||
type = str;
|
||||
};
|
||||
username = mkOption {
|
||||
description = "the SSH user to use";
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
};
|
||||
authentication = mkOption {
|
||||
description = "authentication method";
|
||||
type = attrTag {
|
||||
private-key = mkOption {
|
||||
description = "path to the user's SSH private key";
|
||||
type = str;
|
||||
example = "/root/.ssh/id_ed25519";
|
||||
};
|
||||
password = mkOption {
|
||||
description = "SSH password";
|
||||
# TODO: mark as sensitive
|
||||
type = str;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
deployment = attrTag {
|
||||
ssh-host = {
|
||||
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
|
||||
type = submodule {
|
||||
options = {
|
||||
inherit nixos-configuration;
|
||||
ssh = host-ssh;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
_class = "nixops4Deployment";
|
||||
|
||||
options = {
|
||||
resources = mkOption {
|
||||
description = "Collection of deployment resources that can be required by applications and policed by hosting providers";
|
||||
type = attrsOf (
|
||||
submodule (
|
||||
{ ... }:
|
||||
{
|
||||
_class = "fediversity-resource";
|
||||
options = {
|
||||
description = mkOption {
|
||||
description = "Description of the resource to help application module authors and hosting providers to work with it";
|
||||
type = types.str;
|
||||
};
|
||||
request = mkOption {
|
||||
description = "Options for declaring resource requirements by an application, a description of how the resource is consumed or accessed";
|
||||
type = deferredModuleWith { staticModules = [ { _class = "fediversity-resource-request"; } ]; };
|
||||
};
|
||||
policy = mkOption {
|
||||
description = "Options for configuring the resource policy for the hosting provider, a description of how the resource is made available";
|
||||
type = deferredModuleWith {
|
||||
staticModules = [
|
||||
(policy: {
|
||||
_class = "fediversity-resource-policy";
|
||||
options.resource-type = mkOption {
|
||||
description = "The type of resource this policy configures";
|
||||
type = types.optionType;
|
||||
};
|
||||
# TODO(@fricklerhandwerk): we may want to make the function type explict here: `request -> resource-type`
|
||||
# and then also rename this to be consistent with the application's resource mapping
|
||||
options.apply = mkOption {
|
||||
description = "Apply the policy to a request";
|
||||
type = functionTo policy.config.resource-type;
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
applications = mkOption {
|
||||
description = "Collection of Fediversity applications";
|
||||
type = attrsOf (
|
||||
|
@ -52,12 +142,13 @@ in
|
|||
readOnly = true;
|
||||
default = input: (application.config.implementation input).output;
|
||||
};
|
||||
# TODO(@fricklerhandwerk): this needs a better name, it's just the type
|
||||
config-mapping = mkOption {
|
||||
description = "Function type for the mapping from application configuration to required resources";
|
||||
type = submodule functionType;
|
||||
readOnly = true;
|
||||
default = {
|
||||
input-type = application.config.module;
|
||||
input-type = submodule application.config.module;
|
||||
output-type = application-resources;
|
||||
};
|
||||
};
|
||||
|
@ -65,6 +156,60 @@ in
|
|||
})
|
||||
);
|
||||
};
|
||||
environments = mkOption {
|
||||
description = "Run-time environments for Fediversity applications to be deployed to";
|
||||
type = attrsOf (
|
||||
submodule (environment: {
|
||||
_class = "fediversity-environment";
|
||||
options = {
|
||||
resources = mkOption {
|
||||
description = ''
|
||||
Resources made available by the hosting provider, and their policies.
|
||||
|
||||
Setting this is optional, but provides a place to declare that information for programmatic use in the resource mapping.
|
||||
'';
|
||||
# TODO: maybe transpose, and group the resources by type instead
|
||||
type = attrsOf (
|
||||
attrTag (
|
||||
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.policy; }) config.resources
|
||||
)
|
||||
);
|
||||
};
|
||||
implementation = mkOption {
|
||||
description = "Mapping of resources required by applications to available resources; the result can be deployed";
|
||||
type = environment.config.resource-mapping.function-type;
|
||||
};
|
||||
resource-mapping = mkOption {
|
||||
description = "Function type for the mapping from resources to a deployment";
|
||||
type = submodule functionType;
|
||||
readOnly = true;
|
||||
default = {
|
||||
input-type = application-resources;
|
||||
output-type = deployment;
|
||||
};
|
||||
};
|
||||
# TODO(@fricklerhandwerk): maybe this should be a separate thing such as `fediversity-setup`,
|
||||
# which makes explicit which applications and environments are available.
|
||||
# then the deployments can simply be the result of the function application baked into this module.
|
||||
deployment = mkOption {
|
||||
description = "Generate a deployment from a configuration, by applying an environment's resource policies to the applications' resource mappings";
|
||||
type = functionTo (environment.config.resource-mapping.output-type);
|
||||
readOnly = true;
|
||||
default =
|
||||
cfg:
|
||||
# TODO: check cfg.enable.true
|
||||
let
|
||||
required-resources = lib.mapAttrs (
|
||||
name: application-settings: config.applications.${name}.resources application-settings
|
||||
) cfg.applications;
|
||||
in
|
||||
(environment.config.implementation required-resources).output;
|
||||
|
||||
};
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
configuration = mkOption {
|
||||
description = "Configuration type declaring options to be set by operators";
|
||||
type = optionType;
|
||||
|
|
|
@ -21,6 +21,11 @@
|
|||
inherit (pkgs.testers) runNixOSTest;
|
||||
inherit inputs sources;
|
||||
};
|
||||
|
||||
deployment-model = import ./check/data-model {
|
||||
inherit (pkgs.testers) runNixOSTest;
|
||||
inherit inputs sources;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (types)
|
||||
deferredModule
|
||||
submodule
|
||||
functionTo
|
||||
optionType
|
||||
|
@ -14,10 +13,10 @@ in
|
|||
{
|
||||
options = {
|
||||
input-type = mkOption {
|
||||
type = deferredModule;
|
||||
type = optionType;
|
||||
};
|
||||
output-type = mkOption {
|
||||
type = deferredModule;
|
||||
type = optionType;
|
||||
};
|
||||
function-type = mkOption {
|
||||
type = optionType;
|
||||
|
@ -25,10 +24,10 @@ in
|
|||
default = functionTo (submodule {
|
||||
options = {
|
||||
input = mkOption {
|
||||
type = submodule config.input-type;
|
||||
type = config.input-type;
|
||||
};
|
||||
output = mkOption {
|
||||
type = submodule config.output-type;
|
||||
type = config.output-type;
|
||||
};
|
||||
};
|
||||
});
|
||||
|
|
14
deployment/nixos.nix
Normal file
14
deployment/nixos.nix
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
configuration,
|
||||
system ? builtins.currentSystem,
|
||||
}:
|
||||
let
|
||||
sources = import ../npins;
|
||||
os = import "${sources.nixpkgs}/nixos" { inherit system configuration; };
|
||||
in
|
||||
{
|
||||
substituters = builtins.concatStringsSep " " os.config.nix.settings.substituters;
|
||||
trusted_public_keys = builtins.concatStringsSep " " os.config.nix.settings.trusted-public-keys;
|
||||
drv_path = os.config.system.build.toplevel.drvPath;
|
||||
out_path = os.config.system.build.toplevel;
|
||||
}
|
|
@ -20,16 +20,13 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
proxmox = mkOption {
|
||||
type = types.nullOr (
|
||||
types.enum [
|
||||
"procolix"
|
||||
"fediversity"
|
||||
]
|
||||
);
|
||||
isFediversityVm = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
The Proxmox instance. This is used for provisioning only and should be
|
||||
set to `null` if the machine is not a VM.
|
||||
Whether the machine is a Fediversity VM or not. This is used to
|
||||
determine whether the machine should be provisioned via Proxmox or not.
|
||||
Machines that are _not_ Fediversity VM could be physical machines, or
|
||||
VMs that live outside Fediversity, eg. on Procolix's Proxmox.
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
{ sources, ... }:
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [
|
||||
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
|
||||
];
|
||||
## FIXME: It would be nice, but the following leads to infinite recursion
|
||||
## in the way we currently plug `sources` in.
|
||||
##
|
||||
# imports = [
|
||||
# "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
|
||||
# ];
|
||||
|
||||
boot = {
|
||||
initrd = {
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
inputs,
|
||||
lib,
|
||||
config,
|
||||
sources,
|
||||
keys,
|
||||
secrets,
|
||||
...
|
||||
|
@ -33,10 +32,9 @@ in
|
|||
## should go into the `./nixos` subdirectory.
|
||||
nixos.module = {
|
||||
imports = [
|
||||
"${sources.agenix}/modules/age.nix"
|
||||
"${sources.disko}/module.nix"
|
||||
./options.nix
|
||||
./nixos
|
||||
./proxmox-qemu-vm.nix
|
||||
];
|
||||
|
||||
## Inject the shared options from the resource's `config` into the NixOS
|
||||
|
|
|
@ -14,90 +14,55 @@ let
|
|||
mkOption
|
||||
evalModules
|
||||
filterAttrs
|
||||
mapAttrs'
|
||||
deepSeq
|
||||
;
|
||||
inherit (lib.attrsets) genAttrs;
|
||||
|
||||
## Given a machine's name and whether it is a test VM, make a resource module,
|
||||
## except for its missing provider. (Depending on the use of that resource, we
|
||||
## will provide a different one.)
|
||||
makeResourceModule =
|
||||
{ vmName, isTestVm }:
|
||||
{
|
||||
nixos.module.imports = [
|
||||
./common/proxmox-qemu-vm.nix
|
||||
];
|
||||
|
||||
nixos.specialArgs = {
|
||||
inherit
|
||||
sources
|
||||
inputs
|
||||
keys
|
||||
secrets
|
||||
;
|
||||
};
|
||||
|
||||
imports =
|
||||
[
|
||||
./common/resource.nix
|
||||
]
|
||||
++ (
|
||||
if isTestVm then
|
||||
[
|
||||
../machines/operator/${vmName}
|
||||
{
|
||||
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
|
||||
# allow our panel vm access to the test machines
|
||||
keys.panel
|
||||
];
|
||||
}
|
||||
]
|
||||
else
|
||||
[
|
||||
../machines/dev/${vmName}
|
||||
]
|
||||
);
|
||||
fediversityVm.name = vmName;
|
||||
commonResourceModule = {
|
||||
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
|
||||
# flake-parts and have our own data model for how the project is organised
|
||||
# internally
|
||||
_module.args = {
|
||||
inherit
|
||||
inputs
|
||||
keys
|
||||
secrets
|
||||
sources
|
||||
;
|
||||
};
|
||||
|
||||
## FIXME: It would be preferrable to have those `sources`-related imports in
|
||||
## the modules that use them. However, doing so triggers infinite recursions
|
||||
## because of the way we propagate `sources`. `sources` must be propagated by
|
||||
## means of `specialArgs`, but this requires a bigger change.
|
||||
nixos.module.imports = [
|
||||
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
|
||||
"${sources.agenix}/modules/age.nix"
|
||||
"${sources.disko}/module.nix"
|
||||
"${sources.home-manager}/nixos"
|
||||
];
|
||||
|
||||
imports = [
|
||||
./common/resource.nix
|
||||
];
|
||||
};
|
||||
|
||||
## Given a list of machine names, make a deployment with those machines'
|
||||
## configurations as resources.
|
||||
makeDeployment =
|
||||
vmNames:
|
||||
{ providers, ... }:
|
||||
{
|
||||
# XXX: this type merge is for adding `specialArgs` to resource modules
|
||||
options.resources = mkOption {
|
||||
type =
|
||||
with lib.types;
|
||||
lazyAttrsOf (submoduleWith {
|
||||
class = "nixops4Resource";
|
||||
modules = [ ];
|
||||
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs`
|
||||
# once we're sure it's sane. leaving it here for better control during refactoring.
|
||||
specialArgs = {
|
||||
inherit
|
||||
sources
|
||||
inputs
|
||||
keys
|
||||
secrets
|
||||
|
||||
;
|
||||
};
|
||||
});
|
||||
};
|
||||
config = {
|
||||
providers.local = inputs.nixops4.modules.nixops4Provider.local;
|
||||
resources = genAttrs vmNames (vmName: {
|
||||
type = providers.local.exec;
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixops4Resource.nixos
|
||||
(makeResourceModule {
|
||||
inherit vmName;
|
||||
isTestVm = false;
|
||||
})
|
||||
];
|
||||
});
|
||||
};
|
||||
providers.local = inputs.nixops4.modules.nixops4Provider.local;
|
||||
resources = genAttrs vmNames (vmName: {
|
||||
type = providers.local.exec;
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixops4Resource.nixos
|
||||
commonResourceModule
|
||||
../machines/dev/${vmName}
|
||||
];
|
||||
});
|
||||
};
|
||||
makeDeployment' = vmName: makeDeployment [ vmName ];
|
||||
|
||||
|
@ -112,21 +77,29 @@ let
|
|||
fediversity = import ../services/fediversity;
|
||||
}
|
||||
{
|
||||
garageConfigurationResource = makeResourceModule {
|
||||
vmName = "test01";
|
||||
isTestVm = true;
|
||||
garageConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test01
|
||||
];
|
||||
};
|
||||
mastodonConfigurationResource = makeResourceModule {
|
||||
vmName = "test06"; # somehow `test02` has a problem - use test06 instead
|
||||
isTestVm = true;
|
||||
mastodonConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
|
||||
];
|
||||
};
|
||||
peertubeConfigurationResource = makeResourceModule {
|
||||
vmName = "test05";
|
||||
isTestVm = true;
|
||||
peertubeConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test05
|
||||
];
|
||||
};
|
||||
pixelfedConfigurationResource = makeResourceModule {
|
||||
vmName = "test04";
|
||||
isTestVm = true;
|
||||
pixelfedConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test04
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -139,54 +112,63 @@ let
|
|||
## this is only needed to expose NixOS configurations for provisioning
|
||||
## purposes, and eventually all of this should be handled by NixOps4.
|
||||
options = {
|
||||
nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong
|
||||
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
|
||||
nixpkgs = mkOption { };
|
||||
ssh = mkOption { };
|
||||
};
|
||||
};
|
||||
|
||||
makeResourceConfig =
|
||||
vm:
|
||||
{ vmName, isTestVm }:
|
||||
(evalModules {
|
||||
modules = [
|
||||
nixops4ResourceNixosMockOptions
|
||||
(makeResourceModule vm)
|
||||
commonResourceModule
|
||||
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
|
||||
];
|
||||
}).config;
|
||||
|
||||
## Given a VM name, make a NixOS configuration for this machine.
|
||||
makeConfiguration =
|
||||
isTestVm: vmName:
|
||||
let
|
||||
inherit (sources) nixpkgs;
|
||||
in
|
||||
import "${nixpkgs}/nixos" {
|
||||
modules = [
|
||||
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
|
||||
];
|
||||
import "${sources.nixpkgs}/nixos" {
|
||||
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
|
||||
system = "x86_64-linux";
|
||||
};
|
||||
|
||||
makeVmOptions = isTestVm: vmName: {
|
||||
inherit ((makeResourceConfig { inherit vmName isTestVm; }).fediversityVm)
|
||||
proxmox
|
||||
vmId
|
||||
description
|
||||
|
||||
sockets
|
||||
cores
|
||||
memory
|
||||
diskSize
|
||||
|
||||
hostPublicKey
|
||||
unsafeHostPrivateKey
|
||||
;
|
||||
};
|
||||
makeVmOptions =
|
||||
isTestVm: vmName:
|
||||
let
|
||||
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
|
||||
in
|
||||
if config.isFediversityVm then
|
||||
{
|
||||
inherit (config)
|
||||
vmId
|
||||
description
|
||||
sockets
|
||||
cores
|
||||
memory
|
||||
diskSize
|
||||
hostPublicKey
|
||||
unsafeHostPrivateKey
|
||||
;
|
||||
}
|
||||
else
|
||||
null;
|
||||
|
||||
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
|
||||
|
||||
machines = listSubdirectories ../machines/dev;
|
||||
testMachines = listSubdirectories ../machines/operator;
|
||||
|
||||
nixosConfigurations =
|
||||
genAttrs machines (makeConfiguration false)
|
||||
// genAttrs testMachines (makeConfiguration true);
|
||||
vmOptions =
|
||||
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
|
||||
(genAttrs machines (makeVmOptions false) // genAttrs testMachines (makeVmOptions true));
|
||||
|
||||
in
|
||||
{
|
||||
_class = "flake";
|
||||
|
@ -210,10 +192,23 @@ in
|
|||
)
|
||||
);
|
||||
};
|
||||
flake.nixosConfigurations =
|
||||
genAttrs machines (makeConfiguration false)
|
||||
// genAttrs testMachines (makeConfiguration true);
|
||||
flake.vmOptions =
|
||||
genAttrs machines (makeVmOptions false)
|
||||
// genAttrs testMachines (makeVmOptions true);
|
||||
flake = { inherit nixosConfigurations vmOptions; };
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
checks =
|
||||
mapAttrs' (name: nixosConfiguration: {
|
||||
name = "nixosConfigurations-${name}";
|
||||
value = nixosConfiguration.config.system.build.toplevel;
|
||||
}) nixosConfigurations
|
||||
// mapAttrs' (name: vmOptions: {
|
||||
name = "vmOptions-${name}";
|
||||
## Check that VM options builds/evaluates correctly. `deepSeq e1
|
||||
## e2` evaluates `e1` strictly in depth before returning `e2`. We
|
||||
## use this trick because checks need to be derivations, which VM
|
||||
## options are not.
|
||||
value = deepSeq vmOptions pkgs.hello;
|
||||
}) vmOptions;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -179,15 +179,9 @@ grab_vm_options () {
|
|||
--log-format raw --quiet
|
||||
)
|
||||
|
||||
proxmox=$(echo "$options" | jq -r .proxmox)
|
||||
vm_id=$(echo "$options" | jq -r .vmId)
|
||||
description=$(echo "$options" | jq -r .description)
|
||||
|
||||
if [ "$proxmox" != fediversity ]; then
|
||||
die "I do not know how to provision things that are not Fediversity VMs,
|
||||
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
|
||||
fi
|
||||
|
||||
sockets=$(echo "$options" | jq -r .sockets)
|
||||
cores=$(echo "$options" | jq -r .cores)
|
||||
memory=$(echo "$options" | jq -r .memory)
|
||||
|
|
|
@ -167,16 +167,10 @@ grab_vm_options () {
|
|||
--log-format raw --quiet
|
||||
)
|
||||
|
||||
proxmox=$(echo "$options" | jq -r .proxmox)
|
||||
vm_id=$(echo "$options" | jq -r .vmId)
|
||||
|
||||
if [ "$proxmox" != fediversity ]; then
|
||||
die "I do not know how to remove things that are not Fediversity VMs,
|
||||
but I got proxmox = '%s' for VM %s." "$proxmox" "$vm_name"
|
||||
fi
|
||||
|
||||
printf 'done grabing VM options for VM %s. Found VM %d on %s Proxmox.\n' \
|
||||
"$vm_name" "$vm_id" "$proxmox"
|
||||
printf 'done grabing VM options for VM %s. Got id: %d.\n' \
|
||||
"$vm_name" "$vm_id"
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "fedi200";
|
||||
isFediversityVm = true;
|
||||
vmId = 200;
|
||||
proxmox = "fediversity";
|
||||
description = "Testing machine for Hans";
|
||||
|
||||
domain = "abundos.eu";
|
||||
|
@ -16,10 +17,4 @@
|
|||
gateway = "2a00:51c0:13:1305::1";
|
||||
};
|
||||
};
|
||||
|
||||
nixos.module = {
|
||||
imports = [
|
||||
../../../infra/common/proxmox-qemu-vm.nix
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "fedi201";
|
||||
isFediversityVm = true;
|
||||
vmId = 201;
|
||||
proxmox = "fediversity";
|
||||
description = "FediPanel";
|
||||
|
||||
domain = "abundos.eu";
|
||||
|
@ -19,7 +20,6 @@
|
|||
|
||||
nixos.module = {
|
||||
imports = [
|
||||
../../../infra/common/proxmox-qemu-vm.nix
|
||||
./fedipanel.nix
|
||||
];
|
||||
};
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
{
|
||||
config,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
let
|
||||
|
@ -11,7 +10,6 @@ in
|
|||
|
||||
imports = [
|
||||
(import ../../../panel { }).module
|
||||
"${sources.home-manager}/nixos"
|
||||
];
|
||||
|
||||
security.acme = {
|
||||
|
|
|
@ -20,7 +20,9 @@ in
|
|||
ssh.host = mkForce "forgejo-ci";
|
||||
|
||||
fediversityVm = {
|
||||
name = "forgejo-ci";
|
||||
domain = "procolix.com";
|
||||
isFediversityVm = false;
|
||||
|
||||
ipv4 = {
|
||||
interface = "enp1s0f0";
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "vm02116";
|
||||
isFediversityVm = false;
|
||||
vmId = 2116;
|
||||
proxmox = "procolix";
|
||||
description = "Forgejo";
|
||||
|
||||
ipv4.address = "185.206.232.34";
|
||||
|
@ -14,7 +15,6 @@
|
|||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
../../../infra/common/proxmox-qemu-vm.nix
|
||||
./forgejo.nix
|
||||
];
|
||||
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "vm02187";
|
||||
isFediversityVm = false;
|
||||
vmId = 2187;
|
||||
proxmox = "procolix";
|
||||
description = "Wiki";
|
||||
|
||||
ipv4.address = "185.206.232.187";
|
||||
|
@ -14,7 +15,6 @@
|
|||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
../../../infra/common/proxmox-qemu-vm.nix
|
||||
./wiki.nix
|
||||
];
|
||||
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test01";
|
||||
isFediversityVm = true;
|
||||
vmId = 7001;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test02";
|
||||
isFediversityVm = true;
|
||||
vmId = 7002;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test03";
|
||||
isFediversityVm = true;
|
||||
vmId = 7003;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test04";
|
||||
isFediversityVm = true;
|
||||
vmId = 7004;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test05";
|
||||
isFediversityVm = true;
|
||||
vmId = 7005;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test06";
|
||||
isFediversityVm = true;
|
||||
vmId = 7006;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test11";
|
||||
isFediversityVm = true;
|
||||
vmId = 7011;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test12";
|
||||
isFediversityVm = true;
|
||||
vmId = 7012;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test13";
|
||||
isFediversityVm = true;
|
||||
vmId = 7013;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test14";
|
||||
isFediversityVm = true;
|
||||
vmId = 7014;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
Loading…
Add table
Reference in a new issue