Compare commits

...

98 commits

Author SHA1 Message Date
a8cda3388d
use token
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-21 16:54:50 +02:00
2f48cc09d8
rm debug log comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-21 15:21:29 +02:00
024c13147d
rename deployment method to prep for separation
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-21 15:09:15 +02:00
b4b535b404
rm unused options
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-21 14:45:50 +02:00
7b689b7e6f
split out TF proxmox data model
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-21 12:36:47 +02:00
490826e1fc
limit disk size
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
17e26d6426
clean some comments
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
deffe65cab
clean out comments
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
43639a903e
use insecure for test only
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
dc7298396f
dedupe block
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
2c2409f890
lower memory
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
6d2387d2ff
rename bridge
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
0bec66acc4
fix tf syntax
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
eb24a8eab4
pass ipv6
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
5acb8b55dd
add no-op second invocation
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
2bb9258c1c
rm apparently unnecessary gateway stuff
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
9e9d266052
trigger update by codebase hash
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
7e895bcd20
add fixmes
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
913c8f4ce0
rm memory size
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
6e99551b11
output id
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
05973b5d25
print to stderr
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
df456da016
todo wire credentials
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
3997a00716
rm debug key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
914ad6c65f
base-hash
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
1546a3d91c
rm comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
01d0591d08
grub default false
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
1e16178a53
pkgs.qemu
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
f4fe0d2697
unprint await steps
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
c2ca69e4c5
rename await
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
d428e44600
fix null resource commands
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
76334a73a0
make path in single host explicit
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
2c296180e6
working networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
7f6494babd
raise limits
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
c7400ba7c4
working networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
f5446db25d
include await-ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
c5060be34f
execute await-ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
a58918c67d
pass proxmox user/pw thru env var as well, should fix usage from flake again
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
eba070c21f
use jumphost for test
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
ab55a0c635
bootable disk
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
a1def5b11a
rm broken async_pf fix
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
b8bbfe690c
rm old networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
288cee25d4
users
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
0958802be8
factor out await.ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
b356c99785
add debugging key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
f3726fc988
fix verification
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
2c4b9254ef
increase timeout
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
2e6491aa56
add interface to ipv6 addresses
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
fc9f0c0bdc
don't reinstall bootloader
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
672d16a65d
allow spaces in ssh opts
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
9e9d23e0c3
interface sata2 actually exposes initialization info on the vm
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:12 +02:00
9131e007c0
verbose wait
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
dffe0fae26
comment ls echo
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
c24f5187a4
check hello application
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
ef5e943c55
restore jq package
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
ba60e49b90
enable cloud-init
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
cf313f2092
ssh key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
3b4b9b9930
rm comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
541e730921
propagate token
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
6613e89d2f
set up proxmox
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
e94c142c7a
handle CA certificate
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
ad496128ae
add packages
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
2732199230
add fixmes
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
eaa86340c9
prevent kernel panic
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
dd79dbf74c
use json format for output only
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
98d80ea88a
propagate ssh key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
1ed079e6c5
default timeouts
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
e9ee2b6a21
propagate cd
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
5ae8a3709c
use variables
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
7152d6957a
propagate ips
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
0e9a5d66df
rm ubuntu
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
1d683428a2
output: separate ips
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
c910f3fef5
apply: json
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
6612727000
un-generators
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
0b0a25a4db
propagate datastore id
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
52df5e3d0c
add pve-manager for pvesh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
cb0f343b8e
password default
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
931d280c59
upstream nimbolus
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
d5423b5a70
rm output comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
b46eb9b53a
await SSH availability, resolving a race condition
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
2f1cc9daaf
un-hardcode networking info
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
864edcb682
rm bash-proxmox
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
6b05f089c2
skip acme
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
4594385ea3
unlog steps
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:04:11 +02:00
90d64231ac
clean up unused vms
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
1463b56bc3
factor out TF setup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
a465de35e3
first time to get nix run pipeline to succeed including update step
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
30632b5a8b
hardcoded networking setup for nix run vm 101
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
b5d81118a7
mv out acme logic
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
43606cc70b
start documenting needed proxmox user privileges
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
234a28e86a
back to qcow over size difference
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
d2b2fec6f2
some cleanup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
c889546771
automated deployment to proxmox (in nix run) of vm booting to login
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
de17bc6848
simplify
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
f444429139
bootable vm by repart
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
1b6ae37a27
get nix run to build a vm bootable by qemu
```
cp /nix/store/.../nixos.img disk.raw
chmod 0644 disk.raw
qemu-system-x86_64 -enable-kvm -m 2048 -drive
if=virtio,file=./disk.raw,format=raw -bios "$(nix eval --impure --expr
'(import <nixpkgs> { }).OVMF.fd.outPath' | jq -r)/FV/OVMF.fd"
```

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
890f7174f9
WIP: proxmox deployment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>

continued

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-20 20:03:40 +02:00
9c54152439 split data model by test, preventing need to pass args for unrelated tests (#544)
Reviewed-on: fediversity/fediversity#544
2025-10-20 17:35:07 +02:00
ab1b48d2e7 factor out utils (#543)
Reviewed-on: fediversity/fediversity#543
2025-10-20 15:43:57 +02:00
40 changed files with 1881 additions and 440 deletions

View file

@ -12,7 +12,7 @@ on:
jobs:
_checks:
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-model-tf-proxmox","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
runs-on: native
steps:
- run: true
@ -53,6 +53,12 @@ jobs:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf -vL
deployment-model-tf-proxmox:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf-proxmox -L
deployment-panel:
runs-on: native
steps:

View file

@ -1,29 +0,0 @@
{
lib,
...
}:
let
inherit (lib) mkOption types;
in
{
options = {
host = mkOption {
type = types.str;
description = "name of the host to deploy to";
};
targetSystem = mkOption {
type = types.str;
description = "name of the host to deploy to";
};
sshOpts = mkOption {
description = "Extra SSH options (`-o`) to use.";
type = types.listOf types.str;
default = [ ];
example = "ConnectTimeout=60";
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
};
}

View file

@ -1,257 +0,0 @@
{
config,
system,
inputs ? (import ../../../default.nix { }).inputs, # XXX can't be serialized
sources ? import ../../../npins,
...
}@args:
let
# having this module's location (`self`) and (serializable) `args`, we know
# enough to make it re-call itself to extract different info elsewhere later.
# we use this to make a deployment script using the desired nixos config,
# which would otherwise not be serializable, while nix also makes it hard to
# produce its derivation to pass thru without a `nix-instantiate` call,
# which in turn would need to be passed the (unserializable) nixos config.
self = "deployment/check/common/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
deployment-config = config;
inherit (deployment-config)
nodeName
pathToRoot
targetSystem
sshOpts
httpBackend
;
inherit (lib) mkOption types;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit pkgs inputs;
};
modules = [
module
../../data-model.nix
];
}).config;
fediversity = eval (
{ config, ... }:
{
config = {
resources.login-shell = {
description = "The operator needs to be able to log into the shell";
request =
{ ... }:
{
_class = "fediversity-resource-request";
options = {
wheel = mkOption {
description = "Whether the login user needs root permissions";
type = types.bool;
default = false;
};
packages = mkOption {
description = "Packages that need to be available in the user environment";
type = with types; attrsOf package;
};
};
};
policy =
{ config, ... }:
{
_class = "fediversity-resource-policy";
options = {
username = mkOption {
description = "Username for the operator";
type = types.str; # TODO: use the proper constraints from NixOS
};
wheel = mkOption {
description = "Whether to allow login with root permissions";
type = types.bool;
default = false;
};
};
config = {
resource-type = types.raw; # TODO: splice out the user type from NixOS
apply =
requests:
let
# Filter out requests that need wheel if policy doesn't allow it
validRequests = lib.filterAttrs (
_name: req: !req.login-shell.wheel || config.wheel
) requests.resources;
in
lib.optionalAttrs (validRequests != { }) {
${config.username} = {
isNormalUser = true;
packages =
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
};
};
};
};
};
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options.enable = lib.mkEnableOption "Hello in the shell";
};
implementation = cfg: {
resources = lib.optionalAttrs cfg.enable {
hello.login-shell.packages.hello = pkgs.hello;
};
};
};
environments =
let
mkNixosConfiguration =
environment: requests:
{ ... }:
{
imports = [
./data-model-options.nix
../common/sharedOptions.nix
../common/targetNode.nix
"${nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
];
users.users = environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
in
{
single-nixos-vm-ssh = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
}:
{
ssh-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
key-file = null;
inherit sshOpts;
};
module = self;
inherit args deployment-name;
root-path = pathToRoot;
};
};
};
single-nixos-vm-nixops4 = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
...
}:
{
nixops4 =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources.${nodeName} = {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
nixos.module = mkNixosConfiguration environment required-resources;
_module.args = { inherit inputs sources; };
inherit (deployment-config) nodeName pathToRoot pathFromRoot;
};
};
};
};
single-nixos-vm-tf = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
}:
{
tf-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
key-file = null;
inherit sshOpts;
};
module = self;
inherit args deployment-name httpBackend;
root-path = pathToRoot;
};
};
};
};
};
options = {
"example-configuration" = mkOption {
type = config.configuration;
default = {
enable = true;
applications.hello.enable = true;
};
};
"ssh-deployment" =
let
env = config.environments."single-nixos-vm-ssh";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "ssh-deployment";
configuration = config."example-configuration";
};
};
"nixops4-deployment" =
let
env = config.environments."single-nixos-vm-nixops4";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "nixops4-deployment";
configuration = config."example-configuration";
};
};
"tf-deployment" =
let
env = config.environments."single-nixos-vm-tf";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "tf-deployment";
configuration = config."example-configuration";
};
};
};
}
);
in
fediversity

View file

@ -41,7 +41,7 @@ in
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4 * 1024;
diskSize = 4 * 1024;
diskSize = 32 * 1024;
cores = 2;
};
@ -62,6 +62,7 @@ in
sources.nixpkgs
sources.flake-inputs
sources.git-hooks
sources.disko
pkgs.stdenv
pkgs.stdenvNoCC
@ -75,6 +76,7 @@ in
machine =
(pkgs.nixos [
./targetNode.nix
../../../infra/common/nixos/repart.nix
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";

View file

@ -0,0 +1,91 @@
{
config,
pkgs,
lib,
...
}:
let
inherit (lib) mkOption types;
in
{
config = {
resources.login-shell = {
description = "The operator needs to be able to log into the shell";
request =
{ ... }:
{
_class = "fediversity-resource-request";
options = {
wheel = mkOption {
description = "Whether the login user needs root permissions";
type = types.bool;
default = false;
};
packages = mkOption {
description = "Packages that need to be available in the user environment";
type = with types; attrsOf package;
};
};
};
policy =
{ config, ... }:
{
_class = "fediversity-resource-policy";
options = {
username = mkOption {
description = "Username for the operator";
type = types.str; # TODO: use the proper constraints from NixOS
};
wheel = mkOption {
description = "Whether to allow login with root permissions";
type = types.bool;
default = false;
};
};
config = {
resource-type = types.raw; # TODO: splice out the user type from NixOS
apply =
requests:
let
# Filter out requests that need wheel if policy doesn't allow it
validRequests = lib.filterAttrs (
_name: req: !req.login-shell.wheel || config.wheel
) requests.resources;
in
lib.optionalAttrs (validRequests != { }) {
${config.username} = {
isNormalUser = true;
packages =
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
};
};
};
};
};
applications.hello =
{ ... }:
{
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
module =
{ ... }:
{
options.enable = lib.mkEnableOption "Hello in the shell";
};
implementation = cfg: {
resources = lib.optionalAttrs cfg.enable {
hello.login-shell.packages.hello = pkgs.hello;
};
};
};
};
options."example-configuration" = mkOption {
type = config.configuration;
default = {
enable = true;
applications.hello.enable = true;
};
};
}

View file

@ -16,7 +16,8 @@ in
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
# FIXME uncomment this when using test over `nix run`
# (modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
@ -42,8 +43,8 @@ in
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
# Test VMs don't have a bootloader by default.
boot.loader.grub.enable = lib.mkDefault false;
}
(mkIf config.enableAcme {

View file

@ -0,0 +1,113 @@
{
pkgs,
lib,
sources ? import ../../../npins,
...
}:
{
mkNixosConfiguration =
environment: requests:
{ ... }:
{
imports = [
../common/sharedOptions.nix
# tests need this, however outside tests this (and esp its import nixos-test-base) must not be used
../common/targetNode.nix
"${sources.nixpkgs}/nixos/modules/profiles/minimal.nix"
# "${nixpkgs}/nixos/modules/profiles/perlless.nix" # failed under disko
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# systemd-repart
# ../../../infra/common/nixos/repart.nix
# disko
"${sources.disko}/module.nix"
../../../infra/common/proxmox-qemu-vm.nix
];
# # non-disko
# boot.loader.grub.enable = false;
# boot.loader.systemd-boot.enable = true;
# boot.loader.efi.efiSysMountPoint = "/boot";
# boot.loader.systemd-boot.edk2-uefi-shell.enable = true;
# boot.loader.efi.canTouchEfiVariables = true;
# # proxmox.qemuConf.bios == "ovmf";
# boot.growPartition = true;
# boot.loader.timeout = 1;
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "25.05";
services.qemuGuest.enable = true;
systemd.services.qemu-guest-agent = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
};
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = {
firewall.enable = false;
useDHCP = false;
usePredictableInterfaceNames = false;
useNetworkd = true;
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
};
security.sudo.wheelNeedsPassword = false;
nix.settings.trusted-users = [ "@wheel" ];
services.cloud-init = {
enable = true;
network.enable = true;
};
users.mutableUsers = false;
users.users =
{
root = {
# password = "password"; # cannot log in
# hashedPassword = "$y$j9T$QoArNaV2VrjPhQ6BMG1AA.$uq8jw0.g.dJwIfepqipxzeUD1ochgUs8A5QmVe4qbJ6"; # cannot log in
hashedPasswordFile = builtins.toString (
pkgs.writeText "root-password" "$y$j9T$9g0NqdBsKvQ3ETOPPB0hW.$cIiG648jgA/eVqiCPJJZtI5JYiL6oODZtKI6.lCmJA/"
);
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFZsldWMEsajYysjYsEpNvMOjO4D8L21pTrfQS1T+Hfy"
];
};
# can log in
kiara = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
# cannot log in
operator = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
}
// environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
}

View file

@ -0,0 +1,64 @@
{
config,
system,
inputs,
sources ? import ../../../npins,
...
}:
let
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../common/utils.nix { }) mkNixosConfiguration;
inherit (config)
nodeName
pathFromRoot
pathToRoot
;
in
(pkgs.callPackage ../../utils.nix { inherit inputs; }).evalModel (
{ config, ... }:
{
imports = [ ../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
...
}:
{
nixops4 =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources.${nodeName} = {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../common/targetResource.nix
];
nixos.module = mkNixosConfiguration environment required-resources;
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
};
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -17,13 +17,13 @@
];
nixops4Deployments.check-deployment-model =
(import ./deployment/check/common/data-model.nix {
(import ./deployment/check/data-model-nixops4/data-model.nix {
inherit system inputs;
config = {
inherit (import ./deployment/check/data-model-nixops4/constants.nix) pathToRoot pathFromRoot;
nodeName = "nixops4";
};
})."nixops4-deployment".nixops4;
}).default.nixops4;
}
);
}

View file

@ -6,17 +6,15 @@
}:
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
../../function.nix
../common/data-model.nix
../common/data-model-options.nix
../../utils.nix
../common/model.nix
../common/utils.nix
./constants.nix
./data-model.nix
(config.pathToCwd + "/flake-under-test.nix")
];

View file

@ -0,0 +1,62 @@
{
config,
system,
sources ? import ../../../npins,
...
}@args:
let
self = "deployment/check/data-model-ssh/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../common/utils.nix { }) mkNixosConfiguration;
inherit (config)
nodeName
pathToRoot
targetSystem
sshOpts
;
in
(pkgs.callPackage ../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
...
}:
{
ssh-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
key-file = null;
inherit sshOpts;
};
module = self;
inherit args deployment-name;
root-path = pathToRoot;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -6,25 +6,19 @@
let
inherit (pkgs) system;
nodeName = "ssh";
deployment-config = {
inherit nodeName;
inherit (import ./constants.nix) pathToRoot;
targetSystem = system;
sshOpts = [ ];
};
deploy =
(import ../common/data-model.nix {
(import ./data-model.nix {
inherit system;
config = deployment-config;
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."ssh-deployment".ssh-host.run;
config = {
inherit nodeName;
inherit (import ./constants.nix) pathToRoot;
targetSystem = system;
sshOpts = [ ];
};
}).default.ssh-host.run;
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
@ -33,8 +27,6 @@ in
../../run/ssh-single-host/run.sh
../../../npins/default.nix
../../../npins/sources.json
../common/data-model.nix
../common/data-model-options.nix
./constants.nix
];

View file

@ -0,0 +1,10 @@
{
targetMachines = [
"pve"
];
pathToRoot = builtins.path {
path = ../../..;
name = "root";
};
pathFromRoot = "/deployment/check/data-model-tf-proxmox";
}

View file

@ -0,0 +1,90 @@
{
config,
system,
sources ? import ../../../npins,
...
}@args:
let
# inherit (args) sources;
self = "deployment/check/data-model-tf-proxmox/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (config)
nodeName
pathToRoot
targetSystem
sshOpts
httpBackend
key-file
node-name
bridge
vlanId
imageDatastoreId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
in
(pkgs.callPackage ../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell = {
wheel = true;
username = "operator";
};
implementation =
{
required-resources,
deployment-name,
}:
{
tf-proxmox-vm = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
inherit key-file sshOpts;
};
module = self;
inherit
args
deployment-name
httpBackend
node-name
bridge
vlanId
imageDatastoreId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
root-path = pathToRoot;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -0,0 +1,51 @@
{
inputs,
sources,
system,
}:
let
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: prev: {
terraform-backend =
prev.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/te/terraform-backend/package.nix"
{ };
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# breaks from https://github.com/NixOS/nixpkgs/commit/06b354eb2dc535c57e9b4caaa16d79168f117a26,
# which updates libvncserver to 0.9.15, which was not yet patched at https://git.proxmox.com/?p=vncterm.git.
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-manager
pve-ha-manager
pve-qemu
;
};
in
pkgs.testers.runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
;
}

View file

@ -0,0 +1,217 @@
{
lib,
pkgs,
sources,
...
}:
let
inherit (pkgs) system;
backendPort = builtins.toString 8080;
httpBackend = rec {
TF_HTTP_USERNAME = "basic";
TF_HTTP_PASSWORD = "fake-secret";
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/project1/example";
TF_HTTP_LOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_UNLOCK_ADDRESS = TF_HTTP_ADDRESS;
};
# FIXME generate the image `nixos-generate` was to make, but now do it for a desired `-c configuration.nix` rather than whatever generic thing now
deployment =
(import ./data-model.nix {
inherit sources system;
config = {
inherit httpBackend;
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
sshOpts = [
"ProxyCommand=ssh -W %h:%p pve"
];
key-file = "/root/.ssh/id_ed25519";
node-name = "pve";
bridge = "br0";
vlanId = 0;
imageDatastoreId = "local";
vmDatastoreId = "local";
cdDatastoreId = "local";
ipv4Gateway = "192.168.10.1";
ipv4Address = "192.168.10.236/24";
ipv6Gateway = "";
ipv6Address = "";
};
}).default.tf-proxmox-vm;
in
{
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox-vm/run.sh
../../run/tf-proxmox-vm/await-ssh.sh
];
nodes.pve =
{ sources, ... }:
{
imports = [
"${sources.proxmox-nixos}/modules/proxmox-ve"
];
environment.systemPackages = [
pkgs.jq
pkgs.qemu
];
networking.firewall.enable = false;
networking.vlans = {
vlan0 = {
id = 0;
interface = "eth0";
};
};
networking.useDHCP = false;
networking = {
bridges.br0.interfaces = [ ];
interfaces.br0.ipv4.addresses = [
{
address = "192.168.10.1";
prefixLength = 24;
}
];
nat = {
enable = true;
internalInterfaces = [ "br0" ];
};
};
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
users.users.root = {
password = "mytestpw";
hashedPasswordFile = lib.mkForce null;
};
# https://github.com/SaumonNet/proxmox-nixos/blob/main/modules/proxmox-ve/default.nix
services.proxmox-ve = {
enable = true;
ipAddress = "192.168.1.1";
};
virtualisation = {
diskSize = 5 * 1024;
memorySize = 3 * 1024;
};
};
nodes.deployer =
{ ... }:
{
imports = [
../../modules/terraform-backend
];
networking.firewall.enable = false;
nix.nixPath = [
(lib.concatStringsSep ":" (lib.mapAttrsToList (k: v: k + "=" + v) sources))
];
environment.systemPackages = [
deployment.run
pkgs.pve-manager
pkgs.openssl
pkgs.jq
(pkgs.callPackage ../../run/tf-proxmox-vm/tf.nix { })
];
# needed only when building from deployer
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
system.extraDependencies = [
pkgs.ubootQemuX86
pkgs.ubootQemuX86.inputDerivation
pkgs.pve-qemu
pkgs.pve-qemu.inputDerivation
pkgs.gnu-config
pkgs.byacc
pkgs.stdenv
pkgs.stdenvNoCC
sources.nixpkgs
pkgs.vte
];
services.terraform-backend = {
enable = true;
settings = {
LISTEN_ADDR = ":${backendPort}";
KMS_KEY = "tsjxw9NjKUBUlzbTnD7orqIAdEmpGYRARvxD51jtY+o=";
};
};
};
extraTestScript = ''
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
pve.succeed("mkdir -p /run/pve")
assert "Proxmox" in pve.succeed("curl -s -i -k https://localhost:8006")
cert = pve.succeed("cat /etc/pve/pve-root-ca.pem").strip()
# set up proxmox
pm_token = pve.succeed("""
set -e
pvesh create /pools --poolid Fediversity
pvesh set /storage/local --content "vztmpl,rootdir,backup,snippets,import,iso,images" 1>/dev/null
pvesh create /access/users/root@pam/token/mytoken --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!mytoken" --roles "Administrator"
""").strip()
# skip indent for EOF
deployer.succeed(f"""
cat > /etc/ssl/certs/pve-root-ca.pem <<EOF
{cert}
EOF
mkdir -p /root/.ssh
cat > /root/.ssh/id_ed25519 <<EOF
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACBWbJXVjBLGo2MrI2LBKTbzDozuA/C9taU630EtU/h38gAAAJDAOy8uwDsv
LgAAAAtzc2gtZWQyNTUxOQAAACBWbJXVjBLGo2MrI2LBKTbzDozuA/C9taU630EtU/h38g
AAAECcF8xjLavgWePoVx45Euewsh6Kw07L6QDDy3WXFCn4bFZsldWMEsajYysjYsEpNvMO
jO4D8L21pTrfQS1T+HfyAAAAC2tpYXJhQG5peG9zAQI=
-----END OPENSSH PRIVATE KEY-----
EOF
chmod 600 /root/.ssh/id_ed25519
""")
deployer.succeed("""
set -xe
cd /etc/ssl/certs
{ cat ca-bundle.crt
cat ca-certificates.crt
cat pve-root-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
openssl verify -CApath /etc/ssl/certs ./pve-root-ca.pem
""")
deploy = f"""
ssh -o BatchMode=yes -o StrictHostKeyChecking=no pve "true"
export PROXMOX_VE_INSECURE="true"
export SSL_CERT_FILE=/tmp/pve-ca-bundle.crt
export PROXMOX_VE_API_TOKEN="root@pam!mytoken={pm_token}"
${lib.getExe deployment.run} | jq -r '.ipv4.value[0]'
"""
with subtest("Run the deployment"):
ip = deployer.succeed(deploy).strip()
with subtest("Verify package"):
deployer.succeed(f"""
ssh -i "/root/.ssh/id_ed25519" -o StrictHostKeyChecking=no -o BatchMode=yes -J pve root@{ip} su - operator -c hello >&2
""")
with subtest("No-op update"):
deployer.succeed(deploy, timeout=120)
'';
}

View file

@ -0,0 +1,62 @@
{
config,
system,
sources ? import ../../../npins,
...
}@args:
let
self = "deployment/check/data-model-tf/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../common/utils.nix { }) mkNixosConfiguration;
inherit (config)
nodeName
pathToRoot
targetSystem
sshOpts
httpBackend
;
in
(pkgs.callPackage ../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
}:
{
tf-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
key-file = null;
inherit sshOpts;
};
module = self;
inherit args deployment-name httpBackend;
root-path = pathToRoot;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -9,31 +9,25 @@ let
inherit (import ./constants.nix) pathToRoot;
nodeName = "target";
backendPort = builtins.toString 8080;
deployment-config = {
inherit nodeName pathToRoot;
targetSystem = system;
sshOpts = [ ];
httpBackend = rec {
TF_HTTP_USERNAME = "basic";
TF_HTTP_PASSWORD = "fake-secret";
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/project1/example";
TF_HTTP_LOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_UNLOCK_ADDRESS = TF_HTTP_ADDRESS;
};
};
deploy =
(import ../common/data-model.nix {
(import ./data-model.nix {
inherit system;
config = deployment-config;
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."tf-deployment".tf-host.run;
config = {
inherit nodeName pathToRoot;
targetSystem = system;
sshOpts = [ ];
httpBackend = rec {
TF_HTTP_USERNAME = "basic";
TF_HTTP_PASSWORD = "fake-secret";
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/project1/example";
TF_HTTP_LOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_UNLOCK_ADDRESS = TF_HTTP_ADDRESS;
};
};
}).default.tf-host.run;
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-single-host/run.sh

View file

@ -1,5 +1,4 @@
{
inputs,
lib,
hostPkgs,
config,
@ -151,17 +150,6 @@ in
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;

View file

@ -15,7 +15,7 @@ in
{
name = "proxmox-basic";
nodes.mypve =
nodes.pve =
{ sources, ... }:
{
imports = [
@ -44,41 +44,41 @@ in
};
testScript = ''
machine.start()
machine.wait_for_unit("pveproxy.service")
assert "running" in machine.succeed("pveproxy status")
pve.start()
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
# Copy Iso
machine.succeed("mkdir -p /var/lib/vz/template/iso/")
machine.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
pve.succeed("mkdir -p /var/lib/vz/template/iso/")
pve.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
# Declarative VM creation
machine.wait_for_unit("multi-user.target")
machine.succeed("qm stop 100 --timeout 0")
pve.wait_for_unit("multi-user.target")
pve.succeed("qm stop 100 --timeout 0")
# Seabios VM creation
machine.succeed(
pve.succeed(
"qm create 101 --kvm 0 --bios seabios -cdrom local:iso/minimal.iso",
"qm start 101",
"qm stop 101 --timeout 0"
)
# Legacy ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 102 --kvm 0 --bios ovmf -cdrom local:iso/minimal.iso",
"qm start 102",
"qm stop 102 --timeout 0"
)
# UEFI ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 103 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m -cdrom local:iso/minimal.iso",
"qm start 103",
"qm stop 103 --timeout 0"
)
# UEFI ovmf vm creation with secure boot
machine.succeed(
pve.succeed(
"qm create 104 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m,pre-enrolled-keys=1 -cdrom local:iso/minimal.iso",
"qm start 104",
"qm stop 104 --timeout 0"

View file

@ -2,17 +2,7 @@ let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption types;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit pkgs inputs;
};
modules = [
module
./data-model.nix
];
}).config;
inherit (pkgs.callPackage ./utils.nix { inherit inputs; }) evalModel;
inherit (inputs.nixops4.lib) mkDeployment;
in
{
@ -30,7 +20,7 @@ in
*/
expr =
let
fediversity = eval (
fediversity = evalModel (
{ config, ... }:
{
config = {

View file

@ -1,8 +1,9 @@
{
pkgs,
lib,
config,
inputs,
pkgs,
sources ? import ../npins,
...
}:
let
@ -18,16 +19,7 @@ let
str
submodule
;
toBash =
v:
lib.replaceStrings [ "\"" ] [ "\\\"" ] (
if lib.isPath v || builtins.isNull v then
toString v
else if lib.isString v then
v
else
lib.strings.toJSON v
);
inherit (pkgs.callPackage ./utils.nix { }) toBash;
withPackages = packages: {
makeWrapperArgs = [
"--prefix"
@ -45,6 +37,12 @@ let
deployment-name,
args,
}:
# having a `module` location and (serializable) `args`, we know
# enough to call it again to extract different info elsewhere later.
# we use this to make a deployment script using the desired nixos config,
# which would otherwise not be serializable, while nix also makes it hard to
# produce its derivation to pass thru without a `nix-instantiate` call,
# which in turn would need to be passed the (unserializable) nixos config.
builtins.toString (
pkgs.writers.writeText "configuration.nix" ''
import ${root-path}/deployment/nixos.nix {
@ -135,6 +133,7 @@ let
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
default = "default";
};
root-path = mkOption {
description = "The path to the root of the repository.";
@ -285,6 +284,229 @@ let
};
});
};
tf-proxmox-vm = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
type = submodule (tf-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
# TODO: add proxmox info
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
description = "The arguments with which to call the module to obtain the NixOS configuration.";
type = types.attrs;
};
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
};
root-path = mkOption {
description = "The path to the root of the repository.";
type = types.path;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
bridge = mkOption {
description = "The name of the network bridge (defaults to vmbr0).";
type = types.str;
default = "vmbr0";
};
vlanId = mkOption {
description = "The VLAN identifier.";
type = types.int;
default = 0;
};
imageDatastoreId = mkOption {
description = "ID of the datastore of the image.";
type = types.str;
default = "local";
};
vmDatastoreId = mkOption {
description = "ID of the datastore of the VM.";
type = types.str;
default = "local";
};
cdDatastoreId = mkOption {
description = "ID of the datastore of the virtual CD-rom drive to use for cloud-init.";
type = types.str;
default = "local";
};
ipv4Gateway = mkOption {
description = "Gateway for IPv4.";
type = types.str;
default = "";
};
ipv4Address = mkOption {
description = "IPv4 address.";
type = types.str;
default = "";
};
ipv6Gateway = mkOption {
description = "Gateway for IPv6.";
type = types.str;
default = "";
};
ipv6Address = mkOption {
description = "IPv6 address.";
type = types.str;
default = "";
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
module
args
deployment-name
httpBackend
root-path
node-name
bridge
vlanId
imageDatastoreId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
inherit (ssh)
host
username
key-file
sshOpts
;
deployment-type = "tf-proxmox-vm";
nixos_conf = writeConfig {
inherit
system
module
args
deployment-name
root-path
deployment-type
;
};
# machine = import nixos_conf;
machine = import ./nixos.nix {
inherit sources system;
configuration = tf-host.config.nixos-configuration;
# configuration = { ... }: {
# imports = [
# tf-host.config.nixos-configuration
# ../infra/common/nixos/repart.nix
# ];
# };
};
# inherit (machine.config.boot.uki) name;
name = "monkey";
# # systemd-repart
# better for cross-compilation, worse for pre-/post-processing, doesn't support MBR: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
# raw = "${machine.config.system.build.image}/${name}.raw";
# disko
# worse for cross-compilation, better for pre-/post-processing, needs manual `imageSize`, random failures: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
raw = "${machine.config.system.build.diskoImages}/main.raw";
# # nixos-generators: note it can straight-up do qcow2 as well, if we settle for nixos-generators
# # `mount: /run/nixos-etc-metadata.J3iARWBtna: failed to setup loop device for /nix/store/14ka2bmx6lcnyr8ah2yl787sqcgxz5ni-etc-metadata.erofs.`
# # [`Error: Failed to parse os-release`](https://github.com/NixOS/nixpkgs/blob/5b1861820a3bc4ef2f60b0afcffb71ea43f5d000/pkgs/by-name/sw/switch-to-configuration-ng/src/src/main.rs#L151)
# raw = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# configuration = tf-host.config.nixos-configuration;
# };
# in
# "${gen.config.system.build.${formatAttr}}/nixos${fileExtension}";
environment = {
key_file = key-file;
ssh_opts = sshOpts;
inherit
host
nixos_conf
bridge
;
node_name = node-name;
ssh_user = username;
vlan_id = vlanId;
image_datastore_id = imageDatastoreId;
vm_datastore_id = vmDatastoreId;
cd_datastore_id = cdDatastoreId;
ipv4_gateway = ipv4Gateway;
ipv4_address = ipv4Address;
ipv6_gateway = ipv6Gateway;
ipv6_address = ipv6Address;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { };
tfDirs = [
"deployment/run/tf-single-host"
"deployment/run/tf-proxmox-vm"
];
};
vm_name = "test14";
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
# nixos-generate gives the burden of building revisions, while systemd-repart handles partitioning ~~at the burden of version revisions~~
# .qcow2 is around half the size of .raw, on top of supporting backups - be it apparently at the cost of performance
qemu-img convert -f raw -O qcow2 -C "${raw}" /tmp/${name}.qcow2
ls -l ${raw} >&2
ls -l /tmp/${name}.qcow2 >&2
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
TF_VAR_image=/tmp/${name}.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-vm/run.sh
'';
# # don't really wanna deal with having to do versioned updates for now
# qemu-img convert -f raw -O qcow2 -C "${machine.config.system.build.image}/${name}.raw" /tmp/${name}.qcow2
};
};
});
};
};
in
{
@ -460,9 +682,7 @@ in
readOnly = true;
default = submodule {
options = {
enable = lib.mkEnableOption {
description = "your Fediversity configuration";
};
enable = lib.mkEnableOption "your Fediversity configuration";
applications = lib.mapAttrs (
_name: application:
mkOption {

View file

@ -40,6 +40,10 @@
deployment-model-tf = import ./check/data-model-tf {
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./check/data-model-tf-proxmox {
inherit inputs sources system;
};
};
};
}

View file

@ -1,9 +1,10 @@
{
configuration,
system,
sources ? import ../npins,
...
}:
let
sources = import ../npins;
eval = import "${sources.nixpkgs}/nixos/lib/eval-config.nix" {
inherit system;
specialArgs = {

View file

@ -1,23 +1,33 @@
#! /usr/bin/env bash
set -xeuo pipefail
declare username host key_file ssh_opts nixos_conf
IFS=" " read -r -a ssh_opts <<< "$( (echo "$ssh_opts" | jq -r '@sh') | tr -d \'\")"
readarray -t ssh_opts < <(echo "$ssh_opts" | jq -r '.[]')
# DEPLOY
sshOpts=(
sshOptsInit=(
-o BatchMode=yes
-o StrictHostKeyChecking=no
)
for ssh_opt in "${ssh_opts[@]}"; do
sshOpts+=(
-o "$ssh_opt"
)
done
if [[ -n "$key_file" ]]; then
sshOpts+=(
sshOptsInit+=(
-i "$key_file"
)
fi
# [@] will quote variables containing spaces itself
sshOptsAt=("${sshOptsInit[@]}")
for ssh_opt in "${ssh_opts[@]}"; do
sshOptsAt+=(
-o "${ssh_opt}"
)
done
# [*] needs manual quoting
sshOptsAsterisk=("${sshOptsInit[@]}")
for ssh_opt in "${ssh_opts[@]}"; do
sshOptsAsterisk+=(
-o "\"${ssh_opt}\""
)
done
destination="$username@$host"
command=(nix-instantiate --show-trace "${nixos_conf}")
@ -32,9 +42,9 @@ command=(nix-instantiate --show-trace "${nixos_conf}")
# FIXME explore import/readFile as ways to instantiate the derivation, potentially allowing to realize the store path up-front from Nix?
outPath=$(nix-store --realize "$("${command[@]}" -A config.system.build.toplevel.drvPath --eval --strict --json | jq -r '.')")
# deploy the config by nix-copy-closure
NIX_SSHOPTS="${sshOpts[*]}" nix-copy-closure --to "$destination" "$outPath" --gzip --use-substitutes
NIX_SSHOPTS="${sshOptsAsterisk[*]}" nix-copy-closure --to "$destination" "$outPath" --gzip --use-substitutes
# switch the remote host to the config
# shellcheck disable=SC2029
ssh "${sshOpts[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath"
ssh "${sshOptsAt[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath"
# shellcheck disable=SC2029
ssh -o "ConnectTimeout=1" -o "ServerAliveInterval=1" "${sshOpts[@]}" "$destination" "nohup $outPath/bin/switch-to-configuration switch &" 2>&1
ssh -o "ConnectTimeout=5" -o "ServerAliveInterval=1" "${sshOptsAt[@]}" "$destination" "nohup env NIXOS_INSTALL_BOOTLOADER=0 $outPath/bin/switch-to-configuration switch &" 2>&1

View file

@ -0,0 +1,31 @@
#! /usr/bin/env bash
set -euo pipefail
declare username host key_file ssh_opts
readarray -t ssh_opts < <(echo "$ssh_opts" | jq -r '.[]')
sshOpts=(
-o BatchMode=yes \
-o StrictHostKeyChecking=no \
-o ConnectTimeout=5 \
-o ServerAliveInterval=5 \
)
if [[ -n "${key_file}" ]]; then
sshOpts+=(
-i "${key_file}"
)
fi
for ssh_opt in "${ssh_opts[@]}"; do
sshOpts+=(
-o "${ssh_opt}"
)
done
for i in $(seq 1 30); do
if ssh "${sshOpts[@]}" "${username}@${host}" "true"; then
exit 0
fi
echo "Waiting for SSH (attempt #$i)..."
sleep 5
done
echo "SSH never came up!" >&2
exit 1

View file

@ -0,0 +1,181 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "= 0.81.0"
}
}
backend "http" {
}
}
locals {
dump_name = "qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
# used only for files and creating custom disks
ssh {
agent = true
# uncomment and configure if using api_token instead of password
username = "root"
# node {
# name = "${var.node_name}"
# address = "${var.host}"
# # port = 22
# }
}
}
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
# FIXME (un)stream
# FIXME handle known-hosts in TF state
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
# won't notice file changes: https://github.com/bpg/terraform-provider-proxmox/issues/677
resource "proxmox_virtual_environment_file" "upload" {
# # https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts
# timeouts {
# create = "60m"
# }
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = var.image_datastore_id
node_name = var.node_name
overwrite = true
timeout_upload = 500
# timeout_upload = 1
source_file {
# path = "/tmp/proxmox-image/${local.dump_name}"
path = var.image
file_name = local.dump_name
# FIXME compute and pass hash (so identical builds don't trigger drift)
# checksum = "sha256"
}
}
resource "proxmox_virtual_environment_vm" "nix_vm" {
lifecycle {
# wait, would this not disseminate any changes to this property,
# or just defer syncing when only this changed?
ignore_changes = [
disk["import_from"],
initialization,
]
}
node_name = var.node_name
pool_id = var.pool_id
description = var.description
started = true
# https://wiki.nixos.org/wiki/Virt-manager#Guest_Agent
agent {
enabled = true
timeout = "2m"
trim = true
}
cpu {
type = "x86-64-v2-AES"
cores = var.cores
sockets = var.sockets
numa = true
}
memory {
dedicated = var.memory
}
disk {
datastore_id = var.vm_datastore_id
file_format = "qcow2"
interface = "scsi0"
discard = "on"
iothread = true
size = var.disk_size
ssd = true
backup = false
cache = "none"
import_from = proxmox_virtual_environment_file.upload.id
}
efi_disk {
datastore_id = var.vm_datastore_id
file_format = "qcow2"
type = "4m"
}
network_device {
model = "virtio"
bridge = var.bridge
vlan_id = var.vlan_id
}
operating_system {
type = "l26"
}
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
initialization {
datastore_id = var.cd_datastore_id
interface = "sata2"
ip_config {
ipv4 {
gateway = var.ipv4_gateway
address = var.ipv4_address
}
ipv6 {
gateway = var.ipv6_gateway
address = var.ipv6_address
}
}
}
}
resource "null_resource" "await_ssh" {
depends_on = [
proxmox_virtual_environment_vm.nix_vm
]
provisioner "local-exec" {
command = "env username='root' host='${proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1][0]}' key_file=${var.key_file} ssh_opts='${var.ssh_opts}' bash ./await-ssh.sh"
}
}
module "nixos-rebuild" {
depends_on = [
data.external.hash,
null_resource.await_ssh,
]
source = "../tf-single-host"
nixos_conf = var.nixos_conf
username = "root"
host = proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1][0]
key_file = var.key_file
ssh_opts = var.ssh_opts
}
output "id" {
value = proxmox_virtual_environment_vm.nix_vm.vm_id
}
output "ipv4" {
value = proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1]
}
output "ipv6" {
value = [ for elem in proxmox_virtual_environment_vm.nix_vm.ipv6_addresses[1] : "${elem}%${proxmox_virtual_environment_vm.nix_vm.network_interface_names[1]}" ]
}

View file

@ -0,0 +1,8 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-vm"
# parallelism=1: limit OOM risk
TF_LOG=info tofu apply --auto-approve -input=false -parallelism=1 >&2
TF_LOG=error tofu output -json

View file

@ -0,0 +1,49 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
}:
# FIXME centralize overlays
# XXX using recent revision for https://github.com/NixOS/nixpkgs/pull/447849
let
sources = import ../../../npins;
mkProvider =
args:
pkgs.terraform-providers.mkProvider (
{ mkProviderFetcher = { repo, ... }: sources.${repo}; } // args
);
in
(
(pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/op/opentofu/package.nix" { })
.overrideAttrs
(old: rec {
patches = (old.patches or [ ]) ++ [
# TF with back-end poses a problem for nix: initialization involves both
# mutation (nix: only inside build) and a network call (nix: not inside build)
../../check/data-model-tf/02-opentofu-sandboxed-init.patch
];
# versions > 1.9.0 need go 1.24+
version = "1.9.0";
src = pkgs.fetchFromGitHub {
owner = "opentofu";
repo = "opentofu";
tag = "v${version}";
hash = "sha256-e0ZzbQdex0DD7Bj9WpcVI5roh0cMbJuNr5nsSVaOSu4=";
};
vendorHash = "sha256-fMTbLSeW+pw6GK8/JLZzG2ER90ss2g1FSDX5+f292do=";
})
).withPlugins
(p: [
p.external
p.null
(mkProvider {
owner = "bpg";
repo = "terraform-provider-proxmox";
# 0.82+ need go 1.25
rev = "v0.81.0";
spdx = "MPL-2.0";
hash = null;
vendorHash = "sha256-cpei22LkKqohlE76CQcIL5d7p+BjNcD6UQ8dl0WXUOc=";
homepage = "https://registry.terraform.io/providers/bpg/proxmox";
provider-source-address = "registry.opentofu.org/bpg/proxmox";
})
])

View file

@ -0,0 +1,133 @@
variable "nixos_conf" {
description = "The path to the NixOS configuration to deploy."
type = string
}
variable "ssh_user" {
description = "the SSH user to use"
type = string
default = "root"
}
variable "host" {
description = "the host of the ProxmoX Virtual Environment."
type = string
}
variable "node_name" {
description = "the name of the ProxmoX node to use."
type = string
}
variable "key_file" {
description = "path to the user's SSH private key"
type = string
}
variable "ssh_opts" {
description = "Extra SSH options (`-o`) to use."
type = string
default = "[]"
}
variable "image" {
# description = ""
type = string
}
variable "bridge" {
description = "The name of the network bridge (defaults to vmbr0)."
type = string
default = "vmbr0"
}
variable "vlan_id" {
description = "The VLAN identifier."
type = number
default = 0
}
variable "image_datastore_id" {
description = "ID of the datastore of the image."
type = string
default = "local"
}
variable "vm_datastore_id" {
description = "ID of the datastore of the VM."
type = string
default = "local"
}
variable "cd_datastore_id" {
description = "ID of the datastore of the virtual CD-rom drive to use for cloud-init."
type = string
default = "local"
}
variable "ipv4_gateway" {
description = "Gateway for IPv4."
type = string
default = ""
}
variable "ipv4_address" {
description = "IPv4 address."
type = string
default = ""
}
variable "ipv6_gateway" {
description = "Gateway for IPv6."
type = string
default = ""
}
variable "ipv6_address" {
description = "IPv6 address."
type = string
default = ""
}
#########################################
variable "category" {
type = string
description = "Category to be used in naming the base image."
default = "test"
}
variable "description" {
type = string
default = ""
}
variable "sockets" {
type = number
description = "The number of sockets of the VM."
default = 1
}
variable "cores" {
type = number
description = "The number of cores of the VM."
default = 1
}
variable "memory" {
type = number
description = "The amount of memory of the VM in MiB."
default = 2048
}
variable "disk_size" {
type = number
description = "The amount of disk of the VM in GiB."
default = 32
}
variable "pool_id" {
type = string
description = "The identifier for a pool to assign the virtual machine to."
default = "Fediversity"
}

View file

@ -5,17 +5,7 @@
httpBackend,
}:
let
# FIXME factor out
toBash =
v:
lib.replaceStrings [ "\"" ] [ "\\\"" ] (
if lib.isPath v || builtins.isNull v then
toString v
else if lib.isString v then
v
else
lib.strings.toJSON v
);
inherit (pkgs.callPackage ../utils.nix { }) toBash;
in
pkgs.writeScriptBin "setup" ''
set -e

View file

@ -1,5 +1,5 @@
#! /usr/bin/env bash
set -xeuo pipefail
set -euo pipefail
declare tf_env
export TF_LOG=info

29
deployment/utils.nix Normal file
View file

@ -0,0 +1,29 @@
{
pkgs,
lib,
inputs ? null,
...
}:
{
evalModel =
module:
(lib.evalModules {
specialArgs = {
inherit pkgs inputs;
};
modules = [
./data-model.nix
module
];
}).config;
toBash =
v:
lib.replaceStrings [ "\"" ] [ "\\\"" ] (
if lib.isPath v || builtins.isNull v then
toString v
else if lib.isString v then
v
else
lib.strings.toJSON v
);
}

View file

@ -52,6 +52,73 @@
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
# https://192.168.51.81:8006/#v1:0:=node%2Fnode051:4:::::8::=apitokens
# apps.default = {
# type = "app";
# program = pkgs.writers.writeBashBin "provision-proxmox.sh"
# {
# makeWrapperArgs = [
# "--prefix"
# "PATH"
# ":"
# "${lib.makeBinPath [
# pkgs.jq
# pkgs.httpie
# ]}"
# ];
# }
# ''
# sh ./infra/proxmox-remove.sh --api-url "https://192.168.51.81:8006/api2/json" --username "kiara@ProcoliX" --password "" 7014 # test14
# sh ./infra/proxmox-provision.sh --api-url "https://192.168.51.81:8006/api2/json" --username "kiara@ProcoliX" --password "" test14
# '';
# };
# api_token = "terraform@pve!provider=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# kiara@ProcoliX!dsdfsfdsfd=30df234b-02f3-4ed9-b778-00d28ad3499c
apps.default =
let
inherit (pkgs) system;
# FIXME rewire to defer env var
deployment = pkgs.writeShellScriptBin "my-app-with-environment" ''
export PROXMOX_VE_SSH_USERNAME="kiara@ProcoliX"
export PROXMOX_VE_SSH_PASSWORD=""
${(import ./deployment/check/common/data-model.nix {
inherit system;
config = {
targetSystem = system;
nodeName = "192.168.51.81"; # root@fediversity-proxmox
pathToRoot = builtins.path {
path = ./.;
name = "root";
};
sshOpts = [ ];
key-file = "";
node-name = "node051";
bridge = "ovsbr0";
vlanId = 1305;
imageDatastoreId = "local";
vmDatastoreId = "linstor_storage";
cdDatastoreId = "local-lvm";
ipv4Gateway = "eth0";
ipv4Address = "";
# ipv4Address = "95.215.187.${vm-id}";
ipv6Gateway = "eth0";
ipv6Address = "";
# ipv6Address = "2a00:51c0:13:1305::${vm-id}";
};
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."tf-proxmox-deployment".tf-proxmox-vm
}
'';
in
{
type = "app";
program = deployment.run;
};
};
}
);

View file

@ -10,6 +10,7 @@ in
imports = [
./networking.nix
./users.nix
./repart.nix
];
time.timeZone = "Europe/Amsterdam";

View file

@ -0,0 +1,192 @@
{
config,
pkgs,
lib,
modulesPath,
...
}:
{
imports = [
"${modulesPath}/image/repart.nix"
];
fileSystems = {
# "/" = {
# fsType = "tmpfs";
# options = [
# "size=20%"
# ];
# };
"/" =
let
partConf = config.image.repart.partitions."root".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/var" =
# let
# partConf = config.image.repart.partitions."var".repartConfig;
# in
# {
# device = "/dev/disk/by-partuuid/${partConf.UUID}";
# fsType = partConf.Format;
# };
"/boot" =
let
partConf = config.image.repart.partitions."esp".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/nix/store" =
# let
# partConf = config.image.repart.partitions."store".repartConfig;
# in
# {
# device = "/dev/disk/by-partlabel/${partConf.Label}";
# fsType = partConf.Format;
# };
};
boot.uki.name = "monkey";
# fileSystems."/".device = "/dev/disk/by-label/nixos";
# https://nixos.org/manual/nixos/stable/#sec-image-repart
# https://x86.lol/generic/2024/08/28/systemd-sysupdate.html
image.repart =
let
efiArch = pkgs.stdenv.hostPlatform.efiArch;
in
{
name = config.boot.uki.name;
# name = "image";
# split = true;
partitions = {
"esp" = {
# The contents to end up in the filesystem image.
contents = {
# "/EFI/BOOT/BOOTX64.EFI".source = "${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
"/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
"${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
"/EFI/Linux/${config.system.boot.loader.ukiFile}".source =
"${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
# https://man.archlinux.org/man/loader.conf.5
"/loader/entries/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
# "/loader/loader.conf".source = pkgs.writeText "loader.conf" ''
# timeout 0
# editor yes
# default *
# logLevel=debug
# '';
# nixos-*.conf
# "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
# title NixOS
# linux /EFI/nixos/kernel.efi
# initrd /EFI/nixos/initrd.efi
# options init=/nix/store/.../init root=LABEL=nixos
# '';
# systemd-boot configuration
"/loader/loader.conf".source = (
pkgs.writeText "$out" ''
timeout 3
''
);
};
# https://www.man7.org/linux//man-pages/man5/repart.d.5.html
repartConfig = {
Priority = 1;
Type = "esp";
MountPoint = "/boot";
Format = "vfat";
UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa";
SizeMinBytes = "500M";
SizeMaxBytes = "500M";
};
# repartConfig = {
# Type = "esp";
# UUID = "c12a7328-f81f-11d2-ba4b-00a0c93ec93b"; # Well known
# Format = "vfat";
# SizeMinBytes = "256M";
# SplitName = "-";
# };
};
"root" = {
storePaths = [ config.system.build.toplevel ];
repartConfig = {
Priority = 2;
Type = "root";
Label = "nixos";
MountPoint = "/";
Format = "ext4";
UUID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb";
# populates the fs twice
Minimize = "guess";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "20G";
};
# "store" = {
# storePaths = [ config.system.build.toplevel ];
# stripNixStorePrefix = true;
# repartConfig = {
# Type = "linux-generic";
# Label = "store_${config.system.image.version}";
# Format = "squashfs";
# Minimize = "off";
# ReadOnly = "yes";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "store";
# };
# };
# # Placeholder for the second installed Nix store.
# "store-empty" = {
# repartConfig = {
# Type = "linux-generic";
# Label = "_empty";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "-";
# };
# };
# # Persistent storage
# "var" = {
# repartConfig = {
# Type = "var";
# UUID = "4d21b016-b534-45c2-a9fb-5c16e091fd2d"; # Well known
# Format = "xfs";
# Label = "nixos-persistent";
# Minimize = "off";
# # Has to be large enough to hold update files.
# SizeMinBytes = "2G";
# SizeMaxBytes = "2G";
# SplitName = "-";
# # Wiping this gives us a clean state.
# FactoryReset = "yes";
# };
# };
};
};
};
}

View file

@ -1,4 +1,4 @@
{ ... }:
{ lib, ... }:
{
_class = "nixos";
@ -11,6 +11,14 @@
# ];
boot = {
loader = {
systemd-boot.enable = true;
efi = {
canTouchEfiVariables = true;
efiSysMountPoint = "/boot";
};
grub.enable = false;
};
initrd = {
availableKernelModules = [
"ata_piix"
@ -22,24 +30,35 @@
};
};
fileSystems."/boot" = {
fsType = "vfat";
device = lib.mkDefault "/dev/sda1";
options = [
"fmask=0022"
"dmask=0022"
];
};
disko.devices.disk.main = {
device = "/dev/sda";
type = "disk";
imageSize = "20G"; # needed for image generation
content = {
type = "gpt";
partitions = {
MBR = {
priority = 0;
size = "1M";
type = "EF02";
};
# mbr = {
# priority = 0;
# size = "1M";
# type = "EF02";
# };
ESP = {
esp = {
priority = 1;
size = "500M";
type = "EF00";
label = "boot";
content = {
type = "filesystem";
format = "vfat";

View file

@ -7,7 +7,7 @@ set -euC
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
node=node051
readonly tmpdir=/tmp/proxmox-provision-$RANDOM
mkdir $tmpdir
@ -69,6 +69,7 @@ while [ $# -gt 0 ]; do
--api-url|--api_url) readonly api_url="$1"; shift ;;
--username) readonly username="$1"; shift ;;
--password) readonly password="$1"; shift ;;
--node) readonly node="$1"; shift ;;
--debug) debug=true ;;
@ -172,11 +173,24 @@ grab_vm_options () {
printf 'Grabing VM options for VM %s...\n' "$vm_name"
options=$(
nix --extra-experimental-features 'nix-command flakes' eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions.$vm_name
" \
--log-format raw --quiet
# nix --extra-experimental-features 'nix-command flakes' eval \
# --impure --raw --expr "
# builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions.$vm_name
# " \
# --log-format raw --quiet
echo '
{
"description":"",
"sockets":1,
"cores":1,
"memory":2048,
"diskSize":32,
"name":"test14",
"vmId":7014,
"hostPublicKey":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHTbxDzq3xFeLvrXs6tyTE08o3CekYZmqFeGmkcHmf21",
"unsafeHostPrivateKey":"-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW\nQyNTUxOQAAACB028Q86t8RXi7617OrckxNPKNwnpGGZqhXhppHB5n9tQAAAIhfhYlCX4WJ\nQgAAAAtzc2gtZWQyNTUxOQAAACB028Q86t8RXi7617OrckxNPKNwnpGGZqhXhppHB5n9tQ\nAAAEAualLRodpovSzGAhza2OVvg5Yp8xv3A7xUNNbKsMTKSHTbxDzq3xFeLvrXs6tyTE08\no3CekYZmqFeGmkcHmf21AAAAAAECAwQF\n-----END OPENSSH PRIVATE KEY-----\n"
}
'
)
vm_id=$(echo "$options" | jq -r .vmId)
@ -220,18 +234,45 @@ build_iso () {
nix_host_keys=
fi
nix --extra-experimental-features 'nix-command flakes' build \
# nix --extra-experimental-features 'nix-command flakes' build \
# --impure --expr "
# let flake = builtins.getFlake (builtins.toString ./.); in
# import ./infra/makeInstallerIso.nix {
# nixosConfiguration = flake.nixosConfigurations.$vm_name;
# # FIXME pass nixpkgs from npins
# $nix_host_keys
# }
# " \
# --log-format raw --quiet \
# --out-link "$tmpdir/installer-$vm_name"
# nix --extra-experimental-features 'nix-command' build \
# --impure --expr "
# import ./infra/makeInstallerIso.nix {
# # nixosConfiguration = $configuration;
# nixosConfiguration = import $configuration;
# $nix_host_keys
# }
# " \
# --log-format raw --quiet \
# --out-link "$tmpdir/installer-$vm_name"
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
nix --extra-experimental-features 'nix-command' build \
--impure --expr "
let flake = builtins.getFlake (builtins.toString ./.); in
import ./infra/makeInstallerIso.nix {
nixosConfiguration = flake.nixosConfigurations.$vm_name;
# FIXME pass nixpkgs from npins
$nix_host_keys
}
(import $configuration).config.system.build.image
" \
--log-format raw --quiet \
--out-link "$tmpdir/installer-$vm_name"
# ls "$tmpdir/installer-$vm_name"
# ls "$tmpdir/installer-$vm_name/image.raw"
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
die 'Something went wrong when building ISO for VM %s.
@ -239,7 +280,8 @@ Check the Nix logs and fix things. Possibly there just is no NixOS configuration
"$vm_name"
fi
ln -sf "$(ls "$tmpdir/installer-$vm_name"/iso/nixos-*.iso)" "$tmpdir/installer-$vm_name.iso"
# ln -sf "$(ls "$tmpdir/installer-$vm_name"/iso/nixos-*.iso)" "$tmpdir/installer-$vm_name.iso"
ln -sf "$(ls "$tmpdir/installer-$vm_name"/image.raw)" "$tmpdir/installer-$vm_name.raw"
printf 'done building ISO for VM %s.\n' "$vm_name"
release_lock build
@ -253,8 +295,8 @@ upload_iso () {
printf 'Uploading ISO for VM %s...\n' "$vm_name"
proxmox_sync POST "$api_url/nodes/$node/storage/local/upload" \
"filename@$tmpdir/installer-$vm_name.iso" \
content==iso
"filename@$tmpdir/installer-$vm_name.raw" \
content==raw
printf 'done uploading ISO for VM %s.\n' "$vm_name"
release_lock upload
@ -266,7 +308,7 @@ upload_iso () {
remove_iso () {
printf 'Removing ISO for VM %s...\n' "$vm_name"
proxmox_sync DELETE "$api_url/nodes/$node/storage/local/content/local:iso/installer-$vm_name.iso"
proxmox_sync DELETE "$api_url/nodes/$node/storage/local/content/local:iso/installer-$vm_name.raw"
printf 'done removing ISO for VM %s.\n' "$vm_name"
}
@ -284,7 +326,7 @@ create_vm () {
pool==Fediversity \
description=="$description" \
\
ide2=="local:iso/installer-$vm_name.iso,media=cdrom" \
ide2=="local:iso/installer-$vm_name.raw,media=cdrom" \
ostype==l26 \
\
bios==ovmf \
@ -360,8 +402,13 @@ provision_vm () (
remove_iso
)
for vm_name in $vm_names; do
provision_vm "$vm_name" &
# FIXME make vm_names a thing from $vm_name to $configuration?
# for vm_name in $vm_names; do
# provision_vm "$vm_name" &
# done
for chunk in $vm_names; do
IFS=: read -r vm_name configuration <<< "$chunk"
provision_vm "$vm_name" "$configuration" &
done
nb_errors=0

View file

@ -7,7 +7,7 @@ set -euC
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
node=node051
readonly tmpdir=/tmp/proxmox-remove-$RANDOM
mkdir $tmpdir
@ -59,6 +59,7 @@ while [ $# -gt 0 ]; do
--api-url|--api_url) readonly api_url="$1"; shift ;;
--username) readonly username=$1; shift ;;
--password) readonly password=$1; shift ;;
--node) readonly node="$1"; shift ;;
-h|-\?|--help) help; exit 0 ;;

View file

@ -202,9 +202,22 @@
},
"branch": "main",
"submodules": false,
"revision": "48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223.tar.gz",
"hash": "0606qcs8x1jwckd1ivf52rqdmi3lkn66iiqh6ghd4kqx0g2bw3nv"
"revision": "ce8768f43b4374287cd8b88d8fa9c0061e749d9a",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/ce8768f43b4374287cd8b88d8fa9c0061e749d9a.tar.gz",
"hash": "116zplxh64wxbq81wsfkmmssjs1l228kvhxfi9d434xd54k6vr35"
},
"terraform-provider-proxmox": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "bpg",
"repo": "terraform-provider-proxmox"
},
"branch": "main",
"submodules": false,
"revision": "891066821bf7993a5006b12a44c5b36dbdb852d8",
"url": "https://github.com/bpg/terraform-provider-proxmox/archive/891066821bf7993a5006b12a44c5b36dbdb852d8.tar.gz",
"hash": "0nh1b1mgkycjib2hfzgmq142kgklnnhk4rci4339pfgqfi1z841a"
}
},
"version": 5