Compare commits

...

13 commits

Author SHA1 Message Date
257788d5b7
factor out TF setup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
7695474b26
first time to get nix run pipeline to succeed including update step
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
6818c3155b
hardcoded networking setup for nix run vm 101
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
df7fb63df9
mv out acme logic
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
c0d327e969
start documenting needed proxmox user privileges
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
7b37e9fe0b
back to qcow over size difference
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
7996243a85
some cleanup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
0a7038f149
automated deployment to proxmox (in nix run) of vm booting to login
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
f0d90263d4
simplify
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
1eca94c055
bootable vm by repart
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
f9ed184068
get nix run to build a vm bootable by qemu
```
cp /nix/store/.../nixos.img disk.raw
chmod 0644 disk.raw
qemu-system-x86_64 -enable-kvm -m 2048 -drive
if=virtio,file=./disk.raw,format=raw -bios "$(nix eval --impure --expr
'(import <nixpkgs> { }).OVMF.fd.outPath' | jq -r)/FV/OVMF.fd"
```

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
b8bc0c55ee
WIP: proxmox deployment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>

continued

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-02 18:35:49 +02:00
a12a1606ed move timeout expectation to tests (#534)
Reviewed-on: fediversity/fediversity#534
2025-09-30 12:00:27 +02:00
34 changed files with 1821 additions and 117 deletions

View file

@ -12,7 +12,7 @@ on:
jobs:
_checks:
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-model-tf-proxmox","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
runs-on: native
steps:
- run: true
@ -53,6 +53,12 @@ jobs:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf -vL
deployment-model-tf-proxmox:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf-proxmox -L
deployment-panel:
runs-on: native
steps:

View file

@ -21,5 +21,22 @@ in
default = [ ];
example = "ConnectTimeout=60";
};
proxmox-user = mkOption {
description = "The ProxmoX user to use.";
type = types.str;
default = "root@pam";
};
proxmox-password = mkOption {
description = "The ProxmoX password to use.";
type = types.str;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
vm-names = mkOption {
description = "The names of VMs to provision.";
type = types.listOf types.str;
};
};
}

View file

@ -5,6 +5,7 @@
sources ? import ../../../npins,
...
}@args:
# FIXME allow default values for `config` module parameters?
let
# having this module's location (`self`) and (serializable) `args`, we know
@ -23,13 +24,17 @@ let
pathToRoot
targetSystem
sshOpts
proxmox-user
proxmox-password
node-name
vm-names
;
inherit (lib) mkOption types;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit pkgs inputs;
inherit pkgs inputs sources;
};
modules = [
module
@ -90,6 +95,7 @@ let
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
password = "password";
};
};
};
@ -106,7 +112,10 @@ let
};
implementation = cfg: {
resources = lib.optionalAttrs cfg.enable {
hello.login-shell.packages.hello = pkgs.hello;
hello.login-shell = {
wheel = true;
packages.hello = pkgs.hello;
};
};
};
};
@ -119,17 +128,114 @@ let
imports = [
./data-model-options.nix
../common/sharedOptions.nix
# tests need this, however outside tests this (and esp its import nixos-test-base) must not be used
../common/targetNode.nix
"${nixpkgs}/nixos/modules/profiles/minimal.nix"
# "${nixpkgs}/nixos/modules/profiles/perlless.nix" # failed under disko
"${nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# systemd-repart
# ../../../infra/common/nixos/repart.nix
# disko
"${sources.disko}/module.nix"
../../../infra/common/proxmox-qemu-vm.nix
];
users.users = environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
# # non-disko
# boot.loader.grub.enable = false;
# boot.loader.systemd-boot.enable = true;
# boot.loader.efi.efiSysMountPoint = "/boot";
# boot.loader.systemd-boot.edk2-uefi-shell.enable = true;
# boot.loader.efi.canTouchEfiVariables = true;
# # proxmox.qemuConf.bios == "ovmf";
# boot.growPartition = true;
# boot.loader.timeout = 1;
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "25.05";
services.qemuGuest.enable = true;
systemd.services.qemu-guest-agent = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
};
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = {
firewall.enable = false;
usePredictableInterfaceNames = false;
interfaces.eth0.ipv4.addresses = [
{
address = "95.215.187.101";
prefixLength = 24;
}
];
interfaces.eth0.ipv6.addresses = [
{
address = "2a00:51c0:13:1305::101";
prefixLength = 64;
}
];
defaultGateway = {
address = "95.215.187.1";
interface = "eth0";
};
defaultGateway6 = {
address = "2a00:51c0:13:1305::1";
interface = "eth0";
};
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
};
security.sudo.wheelNeedsPassword = false;
nix.settings.trusted-users = [ "@wheel" ];
users.mutableUsers = false;
users.users =
{
root = {
# password = "password"; # cannot log in
hashedPassword = "$y$j9T$QoArNaV2VrjPhQ6BMG1AA.$uq8jw0.g.dJwIfepqipxzeUD1ochgUs8A5QmVe4qbJ6"; # cannot log in
# hashedPasswordFile = pkgs.writeText "root-password" "$y$j9T$QoArNaV2VrjPhQ6BMG1AA.$uq8jw0.g.dJwIfepqipxzeUD1ochgUs8A5QmVe4qbJ6"; # type not null/string
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
# can log in
kiara = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
# cannot log in
operator = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
}
// environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
in
{
@ -206,6 +312,70 @@ let
};
};
};
single-nixos-vm-bash-proxmox = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
}:
{
bash-proxmox-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
# ssh = {
# username = "root";
# host = nodeName;
# key-file = null;
# inherit sshOpts;
# };
module = self;
inherit
args
deployment-name
# proxmox-host
proxmox-user
proxmox-password
node-name
vm-names
;
proxmox-host = nodeName;
root-path = pathToRoot;
};
};
};
single-nixos-vm-tf-proxmox = environment: {
resources."operator-environment".login-shell = {
wheel = true;
username = "operator";
};
implementation =
{
required-resources,
deployment-name,
}:
{
tf-proxmox-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
key-file = null;
inherit sshOpts;
};
module = self;
inherit
args
deployment-name
proxmox-user
proxmox-password
node-name
;
root-path = pathToRoot;
};
};
};
};
};
options = {
@ -249,6 +419,28 @@ let
configuration = config."example-configuration";
};
};
"bash-proxmox-deployment" =
let
env = config.environments."single-nixos-vm-bash-proxmox";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "bash-proxmox-deployment";
configuration = config."example-configuration";
};
};
"tf-proxmox-deployment" =
let
env = config.environments."single-nixos-vm-tf-proxmox";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "tf-proxmox-deployment";
configuration = config."example-configuration";
};
};
};
}
);

View file

@ -41,7 +41,7 @@ in
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4 * 1024;
diskSize = 4 * 1024;
diskSize = 32 * 1024;
cores = 2;
};
@ -62,6 +62,7 @@ in
sources.nixpkgs
sources.flake-inputs
sources.git-hooks
sources.disko
pkgs.stdenv
pkgs.stdenvNoCC
@ -75,6 +76,7 @@ in
machine =
(pkgs.nixos [
./targetNode.nix
../../../infra/common/nixos/repart.nix
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";

View file

@ -82,39 +82,39 @@ in
nodes =
{
deployer = {
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
deployer = lib.mkMerge [
{
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
}
(lib.mkIf config.enableAcme {
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
})
];
acme = lib.mkIf config.enableAcme {
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
## break things once we play with DNS?
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
## We aren't testing ACME - we just want certificates.
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
};
}
//
(
if config.enableAcme then
{
acme = {
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
## break things once we play with DNS?
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
## We aren't testing ACME - we just want certificates.
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
};
}
else
{ }
)
//
genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
// genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
testScript = ''
${forConcat (attrNames config.nodes) (n: ''

View file

@ -16,7 +16,8 @@ in
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
# FIXME uncomment this when using test over `nix run`
# (modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
@ -42,8 +43,8 @@ in
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
# Test VMs don't have a bootloader by default.
# boot.loader.grub.enable = false;
}
(mkIf config.enableAcme {

View file

@ -0,0 +1,11 @@
{
targetMachines = [
"pve"
];
pathToRoot = builtins.path {
path = ../../..;
name = "root";
};
pathFromRoot = "/deployment/check/data-model-bash-proxmox";
enableAcme = true;
}

View file

@ -0,0 +1,47 @@
{
inputs,
sources,
system,
}:
let
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: _: {
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# breaks from https://github.com/NixOS/nixpkgs/commit/06b354eb2dc535c57e9b4caaa16d79168f117a26,
# which updates libvncserver to 0.9.15, which was not yet patched at https://git.proxmox.com/?p=vncterm.git.
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-ha-manager
;
};
in
pkgs.testers.runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1,113 @@
{
lib,
pkgs,
sources,
...
}:
let
inherit (pkgs) system;
deployment-config = {
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
sshOpts = [ ];
proxmox-user = "root@pam";
proxmox-password = "mytestpw";
node-name = "pve";
vm-names = [ "test14" ];
};
# FIXME generate the image `nixos-generate` was to make, but now do it for a desired `-c configuration.nix` rather than whatever generic thing now
deployment =
(import ../common/data-model.nix {
inherit system;
config = deployment-config;
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."bash-proxmox-deployment".bash-proxmox-host;
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox/run.sh
];
nodes.pve =
{ sources, ... }:
{
imports = [
"${sources.proxmox-nixos}/modules/proxmox-ve"
];
users.users.root = {
password = "mytestpw";
hashedPasswordFile = lib.mkForce null;
};
services.proxmox-ve = {
enable = true;
ipAddress = "192.168.1.1";
vms = {
myvm1 = {
vmid = 100;
memory = 1024;
cores = 1;
sockets = 1;
kvm = true;
scsi = [ { file = "local:16"; } ];
};
};
};
virtualisation = {
diskSize = 2 * 1024;
memorySize = 2048;
};
};
nodes.deployer =
{ ... }:
{
nix.nixPath = [
(lib.concatStringsSep ":" (lib.mapAttrsToList (k: v: k + "=" + v) sources))
];
environment.systemPackages = [
deployment.run
];
# needed only when building from deployer
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
system.extraDependencies = [
pkgs.gnu-config
pkgs.byacc
pkgs.stdenv
pkgs.stdenvNoCC
sources.nixpkgs
pkgs.vte
];
};
extraTestScript = ''
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
pve.succeed("mkdir -p /run/pve")
assert "Proxmox" in pve.succeed("curl -s -i -k https://localhost:8006")
# pve.succeed("pvesh get /nodes && exit 1")
# pve.succeed("pvesh set /access/password --userid root@pam --password mypwdlol --confirmation-password mytestpw 1>&2")
# pve.succeed("curl -s -i -k -d '{\"userid\":\"root@pam\",\"password\":\"mypwdhaha\",\"confirmation-password\":\"mypwdlol\"}' -X PUT https://localhost:8006/api2/json/access/password 1>&2")
with subtest("Run the deployment"):
deployer.succeed("""
${lib.getExe deployment.run}
""")
# target.succeed("su - operator -c hello 1>&2")
'';
}

View file

@ -5,9 +5,10 @@
}:
let
inherit (pkgs) system;
nodeName = "ssh";
deployment-config = {
inherit nodeName;
inherit (import ./constants.nix) pathToRoot;
nodeName = "ssh";
targetSystem = system;
sshOpts = [ ];
};
@ -58,9 +59,10 @@ in
ssh.fail("hello 1>&2")
with subtest("Run the deployment"):
deployer.succeed("""
output = deployer.fail("""
${lib.getExe deploy}
""")
assert "Timeout, server ${nodeName} not responding" in output
ssh.wait_for_unit("multi-user.target")
ssh.succeed("su - operator -c hello 1>&2")
'';

View file

@ -0,0 +1,11 @@
{
targetMachines = [
"pve"
];
pathToRoot = builtins.path {
path = ../../..;
name = "root";
};
pathFromRoot = "/deployment/check/data-model-tf-proxmox";
enableAcme = true;
}

View file

@ -0,0 +1,48 @@
{
inputs,
sources,
system,
}:
let
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: _: {
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# breaks from https://github.com/NixOS/nixpkgs/commit/06b354eb2dc535c57e9b4caaa16d79168f117a26,
# which updates libvncserver to 0.9.15, which was not yet patched at https://git.proxmox.com/?p=vncterm.git.
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-ha-manager
pve-qemu
;
};
in
pkgs.testers.runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
enableAcme
;
}

View file

@ -0,0 +1,117 @@
{
lib,
pkgs,
sources,
...
}:
let
inherit (pkgs) system;
deployment-config = {
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
sshOpts = [ ];
proxmox-user = "root@pam";
proxmox-password = "mytestpw";
node-name = "pve";
};
# FIXME generate the image `nixos-generate` was to make, but now do it for a desired `-c configuration.nix` rather than whatever generic thing now
deployment =
(import ../common/data-model.nix {
inherit system;
config = deployment-config;
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."tf-proxmox-deployment".tf-proxmox-host;
# tracking non-tarball downloads seems unsupported still in npins:
# https://github.com/andir/npins/issues/163
in
{
_class = "nixosTest";
imports = [
../common/data-model-options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox/run.sh
];
nodes.pve =
{ sources, ... }:
{
imports = [
"${sources.proxmox-nixos}/modules/proxmox-ve"
];
users.users.root = {
password = "mytestpw";
hashedPasswordFile = lib.mkForce null;
};
services.proxmox-ve = {
enable = true;
ipAddress = "192.168.1.1";
vms = {
myvm1 = {
vmid = 100;
memory = 1024;
cores = 1;
sockets = 1;
kvm = true;
scsi = [ { file = "local:16"; } ];
};
};
};
virtualisation = {
diskSize = 2 * 1024;
memorySize = 2048;
};
};
nodes.deployer =
{ ... }:
{
nix.nixPath = [
(lib.concatStringsSep ":" (lib.mapAttrsToList (k: v: k + "=" + v) sources))
];
environment.systemPackages = [
deployment.run
];
# needed only when building from deployer
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
system.extraDependencies = [
pkgs.ubootQemuX86
pkgs.ubootQemuX86.inputDerivation
pkgs.pve-qemu
pkgs.pve-qemu.inputDerivation
pkgs.gnu-config
pkgs.byacc
pkgs.stdenv
pkgs.stdenvNoCC
sources.nixpkgs
pkgs.vte
];
};
extraTestScript = ''
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
pve.succeed("mkdir -p /run/pve")
assert "Proxmox" in pve.succeed("curl -s -i -k https://localhost:8006")
# pve.succeed("pvesh set /access/password --userid root@pam --password mypwdlol --confirmation-password mytestpw 1>&2")
# pve.succeed("curl -s -i -k -d '{\"userid\":\"root@pam\",\"password\":\"mypwdhaha\",\"confirmation-password\":\"mypwdlol\"}' -X PUT https://localhost:8006/api2/json/access/password 1>&2")
with subtest("Run the deployment"):
deployer.succeed("""
${lib.getExe deployment.run}
""")
# target.succeed("su - operator -c hello 1>&2")
'';
}

View file

@ -5,9 +5,10 @@
}:
let
inherit (pkgs) system;
nodeName = "target";
deployment-config = {
inherit nodeName;
inherit (import ./constants.nix) pathToRoot;
nodeName = "target";
targetSystem = system;
sshOpts = [ ];
};
@ -51,9 +52,10 @@ in
target.fail("hello 1>&2")
with subtest("Run the deployment"):
deployer.succeed("""
output = deployer.fail("""
${lib.getExe deploy}
""")
assert "Timeout, server ${nodeName} not responding" in output
target.wait_for_unit("multi-user.target")
target.succeed("su - operator -c hello 1>&2")
'';

View file

@ -1,5 +1,4 @@
{
inputs,
lib,
hostPkgs,
config,
@ -151,17 +150,6 @@ in
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;

View file

@ -15,7 +15,7 @@ in
{
name = "proxmox-basic";
nodes.mypve =
nodes.pve =
{ sources, ... }:
{
imports = [
@ -44,41 +44,41 @@ in
};
testScript = ''
machine.start()
machine.wait_for_unit("pveproxy.service")
assert "running" in machine.succeed("pveproxy status")
pve.start()
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
# Copy Iso
machine.succeed("mkdir -p /var/lib/vz/template/iso/")
machine.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
pve.succeed("mkdir -p /var/lib/vz/template/iso/")
pve.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
# Declarative VM creation
machine.wait_for_unit("multi-user.target")
machine.succeed("qm stop 100 --timeout 0")
pve.wait_for_unit("multi-user.target")
pve.succeed("qm stop 100 --timeout 0")
# Seabios VM creation
machine.succeed(
pve.succeed(
"qm create 101 --kvm 0 --bios seabios -cdrom local:iso/minimal.iso",
"qm start 101",
"qm stop 101 --timeout 0"
)
# Legacy ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 102 --kvm 0 --bios ovmf -cdrom local:iso/minimal.iso",
"qm start 102",
"qm stop 102 --timeout 0"
)
# UEFI ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 103 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m -cdrom local:iso/minimal.iso",
"qm start 103",
"qm stop 103 --timeout 0"
)
# UEFI ovmf vm creation with secure boot
machine.succeed(
pve.succeed(
"qm create 104 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m,pre-enrolled-keys=1 -cdrom local:iso/minimal.iso",
"qm start 104",
"qm stop 104 --timeout 0"

View file

@ -276,6 +276,313 @@ let
};
});
};
bash-proxmox-host = mkOption {
description = "A bash deployment by SSH to create or update a NixOS VM in ProxmoX.";
type = submodule (bash-proxmox-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
# TODO: add proxmox info
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
description = "The arguments with which to call the module to obtain the NixOS configuration.";
type = types.attrs;
};
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
};
root-path = mkOption {
description = "The path to the root of the repository.";
type = types.path;
};
proxmox-host = mkOption {
description = "The host of the ProxmoX instance to use.";
type = types.str;
default = "192.168.51.81";
};
vm-names = mkOption {
description = "The names of VMs to provision.";
type = types.listOf types.str;
};
proxmox-user = mkOption {
description = "The ProxmoX user to use.";
type = types.str;
default = "root@pam";
};
# TODO: is sensitivity here handled properly?
proxmox-password = mkOption {
description = "The ProxmoX password to use.";
type = types.str;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
run = mkOption {
type = types.package;
# error: The option `.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
deployment-type = "bash-proxmox-host";
inherit (bash-proxmox-host.config)
system
module
args
deployment-name
root-path
node-name
proxmox-host
proxmox-user
proxmox-password
vm-names
;
nixos_conf = writeConfig {
inherit
system
module
args
deployment-name
root-path
deployment-type
;
};
in
pkgs.writers.writeBashBin "provision-proxmox.sh"
(withPackages [
pkgs.httpie
pkgs.jq
])
''
bash ./infra/proxmox-remove.sh \
--api-url "https://${proxmox-host}:8006/api2/json" \
--username "${proxmox-user}" \
--password "${proxmox-password}" \
--node "${node-name}" \
7014
# ^ hardcoded ID of test14
# ${lib.concatStringsSep " " vm-names}
bash ./infra/proxmox-provision.sh \
--api-url "https://${proxmox-host}:8006/api2/json" \
--username "${proxmox-user}" \
--password "${proxmox-password}" \
--node "${node-name}" \
${
# lib.concatStringsSep " " vm-names
lib.concatStringsSep " " (lib.lists.map (k: "${k}:${nixos_conf}") vm-names)
}
# ${lib.concatStringsSep " " vm-names}
'';
};
};
});
};
tf-proxmox-host = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
# type = submodule (tf-host: {
type = submodule (
tf-host:
let
raw = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/raw.nix";
formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/raw-efi.nix";
formatAttr = "raw";
fileExtension = ".img";
};
format = raw;
# qcow = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow.nix";
# formatAttr = "qcow";
# fileExtension = ".qcow2";
# };
# format = qcow;
# qcow-efi = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow-efi.nix";
# formatAttr = "qcow-efi";
# fileExtension = ".qcow2";
# };
# format = qcow-efi;
inherit (format) formatConfig fileExtension formatAttr;
in
{
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
# TODO: add proxmox info
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
description = "The arguments with which to call the module to obtain the NixOS configuration.";
type = types.attrs;
};
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
};
root-path = mkOption {
description = "The path to the root of the repository.";
type = types.path;
};
proxmox-user = mkOption {
description = "The ProxmoX user to use.";
type = types.str;
default = "root@pam";
};
# TODO: is sensitivity here handled properly?
proxmox-password = mkOption {
description = "The ProxmoX password to use.";
type = types.str;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
module
args
deployment-name
root-path
proxmox-user
proxmox-password
node-name
;
inherit (ssh)
host
username
key-file
sshOpts
;
deployment-type = "tf-proxmox-host";
nixos_conf = writeConfig {
inherit
system
module
args
deployment-name
root-path
deployment-type
;
};
# machine = import nixos_conf;
machine = import ./nixos.nix {
inherit sources system;
configuration = tf-host.config.nixos-configuration;
# configuration = { ... }: {
# imports = [
# tf-host.config.nixos-configuration
# ../infra/common/nixos/repart.nix
# ];
# };
};
# inherit (machine.config.boot.uki) name;
name = "monkey";
# # systemd-repart
# better for cross-compilation, worse for pre-/post-processing, doesn't support MBR: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
# raw = "${machine.config.system.build.image}/${name}.raw";
# disko
# worse for cross-compilation, better for pre-/post-processing, needs manual `imageSize`, random failures: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
raw = "${machine.config.system.build.diskoImages}/main.raw";
# # nixos-generators: note it can straight-up do qcow2 as well, if we settle for nixos-generators
# # `mount: /run/nixos-etc-metadata.J3iARWBtna: failed to setup loop device for /nix/store/14ka2bmx6lcnyr8ah2yl787sqcgxz5ni-etc-metadata.erofs.`
# # [`Error: Failed to parse os-release`](https://github.com/NixOS/nixpkgs/blob/5b1861820a3bc4ef2f60b0afcffb71ea43f5d000/pkgs/by-name/sw/switch-to-configuration-ng/src/src/main.rs#L151)
# raw = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# configuration = tf-host.config.nixos-configuration;
# };
# in
# "${gen.config.system.build.${formatAttr}}/nixos${fileExtension}";
environment = {
key_file = key-file;
ssh_opts = sshOpts;
inherit
host
nixos_conf
;
proxmox_user = proxmox-user;
proxmox_password = proxmox-password;
ssh_user = username;
node_name = node-name;
};
tf-env = pkgs.callPackage ./run/tf-proxmox/tf-env.nix { };
proxmox-host = "192.168.51.81"; # root@fediversity-proxmox
vm-names = [ "test14" ];
vm_name = "test14";
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox/tf.nix { inherit sources; })
])
''
set -xe
# bash ./infra/proxmox-remove.sh \
# --api-url "https://${proxmox-host}:8006/api2/json" \
# --username "${proxmox-user}" \
# --password "${proxmox-password}" \
# --node "${node-name}" \
# 7014
# # ^ hardcoded ID of test14
# # ${lib.concatStringsSep " " vm-names}
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
# nixos-generate gives the burden of building revisions, while systemd-repart handles partitioning ~~at the burden of version revisions~~
# .qcow2 is around half the size of .raw, on top of supporting backups - be it apparently at the cost of performance
qemu-img convert -f raw -O qcow2 -C "${raw}" /tmp/${name}.qcow2
ls -l ${raw}
ls -l /tmp/${name}.qcow2
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
TF_VAR_image=/tmp/${name}.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox/run.sh
'';
# # don't really wanna deal with having to do versioned updates for now
# qemu-img convert -f raw -O qcow2 -C "${machine.config.system.build.image}/${name}.raw" /tmp/${name}.qcow2
};
};
}
);
};
};
in
{

View file

@ -41,6 +41,14 @@
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-model-bash-proxmox = import ./check/data-model-bash-proxmox {
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./check/data-model-tf-proxmox {
inherit inputs sources system;
};
};
};
}

View file

@ -12,8 +12,8 @@ let
modules = [ configuration ];
};
in
{
inherit (eval) pkgs config options;
system = eval.config.system.build.toplevel;
inherit (eval.config.system.build) vm vmWithBootLoader;
}
{
inherit (eval) pkgs config options;
system = eval.config.system.build.toplevel;
inherit (eval.config.system.build) vm vmWithBootLoader;
}

View file

@ -37,11 +37,4 @@ NIX_SSHOPTS="${sshOpts[*]}" nix-copy-closure --to "$destination" "$outPath" --gz
# shellcheck disable=SC2029
ssh "${sshOpts[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath"
# shellcheck disable=SC2029
output=$(ssh -o "ConnectTimeout=1" -o "ServerAliveInterval=1" "${sshOpts[@]}" "$destination" "nohup $outPath/bin/switch-to-configuration switch &" 2>&1) || echo "status code: $?"
echo "output: $output"
if [[ $output != *"Timeout, server $host not responding"* ]]; then
echo "non-timeout error: $output"
exit 1
else
exit 0
fi
ssh -o "ConnectTimeout=1" -o "ServerAliveInterval=1" "${sshOpts[@]}" "$destination" "nohup $outPath/bin/switch-to-configuration switch &" 2>&1

View file

@ -0,0 +1,337 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "= 0.81.0"
}
}
}
locals {
dump_name = "qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
insecure = true
# used only for files and creating custom disks
ssh {
agent = true
# uncomment and configure if using api_token instead of password
username = "root"
# node {
# name = "${var.node_name}"
# address = "${var.host}"
# # port = 22
# }
}
# # Choose one authentication method:
# api_token = var.virtual_environment_api_token
# # OR
username = var.proxmox_user
password = var.proxmox_password
# # OR
# auth_ticket = var.virtual_environment_auth_ticket
# csrf_prevention_token = var.virtual_environment_csrf_prevention_token
}
# # FIXME move to host
# # FIXME add proxmox
# data "external" "base-hash" {
# program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ${path.module}/../common/nixos/base.nix)\\\"}\""]
# }
# # hash of our code directory, used to trigger re-deploy
# # FIXME calculate separately to reduce false positives
# data "external" "hash" {
# program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
# }
# FIXME handle known-hosts in TF state
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
# won't notice file changes: https://github.com/bpg/terraform-provider-proxmox/issues/677
resource "proxmox_virtual_environment_file" "upload" {
# # https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts
# timeouts {
# create = "60m"
# }
# content_type - (Optional) The content type. If not specified, the content type will be inferred from the file extension. Valid values are:
# backup (allowed extensions: .vzdump, .tar.gz, .tar.xz, tar.zst)
# iso (allowed extensions: .iso, .img)
# snippets (allowed extensions: any)
# import (allowed extensions: .raw, .qcow2, .vmdk)
# vztmpl (allowed extensions: .tar.gz, .tar.xz, tar.zst)
# content_type = "backup"
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = "local"
# datastore_id = "local-lvm"
# datastore_id = "backup"
node_name = var.node_name
overwrite = true
timeout_upload = 3600
# timeout_upload = 1
source_file {
# path = "/tmp/proxmox-image/${local.dump_name}"
path = var.image
file_name = local.dump_name
}
}
# resource "proxmox_virtual_environment_download_file" "latest_ubuntu_22_jammy_qcow2_img" {
# content_type = "import"
# datastore_id = "local"
# node_name = var.node_name
# url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# # need to rename the file to *.qcow2 to indicate the actual file format for import
# file_name = "jammy-server-cloudimg-amd64.qcow2"
# }
resource "proxmox_virtual_environment_vm" "nix_vm" {
lifecycle {
# wait, would this not disseminate any changes to this property,
# or just defer syncing when only this changed?
ignore_changes = [
disk["import_from"],
]
}
node_name = var.node_name
pool_id = var.pool_id
description = var.description
started = true
# https://wiki.nixos.org/wiki/Virt-manager#Guest_Agent
agent {
enabled = true
# timeout = "5m"
timeout = "40s"
trim = true
}
cpu {
type = "x86-64-v2-AES"
cores = var.cores
sockets = var.sockets
numa = true
}
memory {
dedicated = var.memory
}
disk {
# datastore_id = "linstor_storage"
datastore_id = "local"
file_format = "qcow2"
interface = "scsi0"
discard = "on"
iothread = true
size = var.disk_size
ssd = true
backup = false
cache = "none"
# FIXME make the provider allow this as a distinct block to allow making this depend on VM id?
# import_from = "local:import/${proxmox_virtual_environment_vm.nix_vm.vm_id}-${local.dump_name}" # bogus import name to test if it would accept self-referential values here # may not refer to itself
# import_from = "local:import/${local.dump_name}"
import_from = proxmox_virtual_environment_file.upload.id
# import_from = proxmox_virtual_environment_download_file.latest_ubuntu_22_jammy_qcow2_img.id
# import_from = "local:import/jammy-server-cloudimg-amd64.qcow2"
}
efi_disk {
# datastore_id = "linstor_storage"
datastore_id = "local"
file_format = "qcow2"
type = "4m"
}
network_device {
model = "virtio"
bridge = "ovsbr0"
vlan_id = 1305
}
operating_system {
type = "l26"
}
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
# # used only for cloud-init
# initialization {
# ip_config {
# ipv4 {
# gateway = "eth0"
# address = "95.215.187.${proxmox_virtual_environment_vm.nix_vm.vm_id}" # error: self-referential block
# }
# ipv6 {
# gateway = "eth0"
# address = "2a00:51c0:13:1305::${proxmox_virtual_environment_vm.nix_vm.vm_id}"
# }
# }
# }
}
# FIXME expose (and handle thru) [`exec`](https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/qemu/{vmid}/agent/exec) endpoint in proxmox TF provider? wait, what command would i use it for?: https://github.com/bpg/terraform-provider-proxmox/issues/1576
module "nixos-rebuild" {
source = "../tf-single-host"
nixos_conf = var.nixos_conf
# username = var.ssh_user # refers to the proxmox ssh user, not the VM one
username = "root"
# host = proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1][0] # does not exist (in time)
host = "95.215.187.${proxmox_virtual_environment_vm.nix_vm.vm_id}"
# host = "2a00:51c0:13:1305::${proxmox_virtual_environment_vm.nix_vm.vm_id}"
# host = "95.215.187.101"
# host = "2a00:51c0:13:1305::101"
key_file = var.key_file
ssh_opts = var.ssh_opts
}
# vm output: {
# "acpi" = true
# "agent" = tolist([
# {
# "enabled" = true
# "timeout" = "15m"
# "trim" = false
# "type" = "virtio"
# },
# ])
# "amd_sev" = tolist([])
# "audio_device" = tolist([])
# "bios" = "ovmf"
# "boot_order" = tolist(null) /* of string */
# "cdrom" = tolist([])
# "clone" = tolist([])
# "cpu" = tolist([
# {
# "affinity" = ""
# "architecture" = ""
# "cores" = 1
# "flags" = tolist(null) /* of string */
# "hotplugged" = 0
# "limit" = 0
# "numa" = true
# "sockets" = 1
# "type" = "x86-64-v2-AES"
# "units" = 1024
# },
# ])
# "description" = ""
# "disk" = tolist([
# {
# "aio" = "io_uring"
# "backup" = false
# "cache" = "none"
# "datastore_id" = "local"
# "discard" = "on"
# "file_format" = "qcow2"
# "file_id" = ""
# "import_from" = "local:import/qemu-nixos-fediversity-test.qcow2"
# "interface" = "scsi0"
# "iothread" = true
# "path_in_datastore" = "101/vm-101-disk-1.qcow2"
# "replicate" = true
# "serial" = ""
# "size" = 32
# "speed" = tolist([])
# "ssd" = true
# },
# ])
# "efi_disk" = tolist([
# {
# "datastore_id" = "local"
# "file_format" = "qcow2"
# "pre_enrolled_keys" = false
# "type" = "4m"
# },
# ])
# "hook_script_file_id" = tostring(null)
# "hostpci" = tolist([])
# "id" = "101"
# "initialization" = tolist([])
# "ipv4_addresses" = tolist([])
# "ipv6_addresses" = tolist([])
# "keyboard_layout" = "en-us"
# "kvm_arguments" = ""
# "mac_addresses" = tolist([])
# "machine" = ""
# "memory" = tolist([
# {
# "dedicated" = 2048
# "floating" = 0
# "hugepages" = ""
# "keep_hugepages" = false
# "shared" = 0
# },
# ])
# "migrate" = false
# "name" = ""
# "network_device" = tolist([
# {
# "bridge" = "vnet1306"
# "disconnected" = false
# "enabled" = true
# "firewall" = false
# "mac_address" = "BC:24:11:DE:E5:A8"
# "model" = "virtio"
# "mtu" = 0
# "queues" = 0
# "rate_limit" = 0
# "trunks" = ""
# "vlan_id" = 0
# },
# ])
# "network_interface_names" = tolist([])
# "node_name" = "node051"
# "numa" = tolist([])
# "on_boot" = true
# "operating_system" = tolist([
# {
# "type" = "l26"
# },
# ])
# "pool_id" = "Fediversity"
# "protection" = false
# "reboot" = false
# "reboot_after_update" = true
# "rng" = tolist([])
# "scsi_hardware" = "virtio-scsi-single"
# "serial_device" = tolist([])
# "smbios" = tolist([])
# "started" = true
# "startup" = tolist([])
# "stop_on_destroy" = false
# "tablet_device" = true
# "tags" = tolist(null) /* of string */
# "template" = false
# "timeout_clone" = 1800
# "timeout_create" = 1800
# "timeout_migrate" = 1800
# "timeout_move_disk" = 1800
# "timeout_reboot" = 1800
# "timeout_shutdown_vm" = 1800
# "timeout_start_vm" = 1800
# "timeout_stop_vm" = 300
# "tpm_state" = tolist([])
# "usb" = tolist([])
# "vga" = tolist([])
# "virtiofs" = tolist([])
# "vm_id" = 101
# "watchdog" = tolist([])
# }
output "ips" {
value = proxmox_virtual_environment_vm.nix_vm
}

View file

@ -0,0 +1,11 @@
#! /usr/bin/env bash
set -xeuo pipefail
declare tf_env
export TF_LOG=info
# # on upload explodes RAM use + logs file content, causing timeout
# export TF_LOG=debug
cd "${tf_env}/deployment/run/tf-proxmox"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -lock=false -input=false -parallelism=1

View file

@ -0,0 +1,33 @@
{
lib,
pkgs,
sources ? import ../../../npins,
}:
pkgs.stdenv.mkDerivation {
name = "tf-repo";
src =
with lib.fileset;
toSource {
root = ../../../.;
# don't copy ignored files
fileset = intersection (gitTracked ../../../.) ../../../.;
};
buildInputs = [
(pkgs.callPackage ./tf.nix { inherit sources; })
(pkgs.callPackage ../tf-setup.nix { inherit sources; })
];
buildPhase = ''
runHook preBuild
for category in deployment/run/tf-single-host deployment/run/tf-proxmox; do
pushd "$category"
source setup
popd
done
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r . $out
runHook postInstall
'';
}

View file

@ -0,0 +1,27 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
sources ? import ../../../npins,
...
}:
let
mkProvider =
args:
pkgs.terraform-providers.mkProvider (
{ mkProviderFetcher = { repo, ... }: sources.${repo}; } // args
);
in
pkgs.opentofu.withPlugins (p: [
p.external
(mkProvider {
owner = "bpg";
repo = "terraform-provider-proxmox";
# 0.82+ need go 1.25
rev = "v0.81.0";
spdx = "MPL-2.0";
hash = null;
vendorHash = "sha256-cpei22LkKqohlE76CQcIL5d7p+BjNcD6UQ8dl0WXUOc=";
homepage = "https://registry.terraform.io/providers/bpg/proxmox";
provider-source-address = "registry.opentofu.org/bpg/proxmox";
})
])

View file

@ -0,0 +1,95 @@
variable "nixos_conf" {
description = "The path to the NixOS configuration to deploy."
type = string
}
variable "ssh_user" {
description = "the SSH user to use"
type = string
default = "root"
}
variable "proxmox_user" {
description = <<EOT
the ProxmoX user to use. needs privileges:
- `VM.Monitor`
- ...
EOT
type = string
default = "root@pam"
}
variable "proxmox_password" {
description = "the ProxmoX password to use"
type = string
sensitive = true
}
variable "host" {
description = "the host of the ProxmoX Virtual Environment."
type = string
}
variable "node_name" {
description = "the name of the ProxmoX node to use."
type = string
}
variable "key_file" {
description = "path to the user's SSH private key"
type = string
}
variable "ssh_opts" {
description = "Extra SSH options (`-o`) to use."
type = string
default = "[]"
}
variable "image" {
# description = ""
type = string
}
#########################################
variable "category" {
type = string
description = "Category to be used in naming the base image."
default = "test"
}
variable "description" {
type = string
default = ""
}
variable "sockets" {
type = number
description = "The number of sockets of the VM."
default = 1
}
variable "cores" {
type = number
description = "The number of cores of the VM."
default = 1
}
variable "memory" {
type = number
description = "The amount of memory of the VM in MiB."
default = 2048
}
variable "disk_size" {
type = number
description = "The amount of disk of the VM in GiB."
default = 32
}
variable "pool_id" {
type = string
description = "The identifier for a pool to assign the virtual machine to."
default = "Fediversity"
}

View file

@ -14,7 +14,7 @@ pkgs.stdenv.mkDerivation {
};
buildInputs = [
(pkgs.callPackage ./tf.nix { })
(pkgs.callPackage ./setup.nix { inherit sources; })
(pkgs.callPackage ../tf-setup.nix { inherit sources; })
];
buildPhase = ''
runHook preBuild

View file

@ -52,6 +52,81 @@
trim-trailing-whitespace.enable = true;
shellcheck.enable = true;
};
# https://192.168.51.81:8006/#v1:0:=node%2Fnode051:4:::::8::=apitokens
# apps.default = {
# type = "app";
# program = pkgs.writers.writeBashBin "provision-proxmox.sh"
# {
# makeWrapperArgs = [
# "--prefix"
# "PATH"
# ":"
# "${lib.makeBinPath [
# pkgs.jq
# pkgs.httpie
# ]}"
# ];
# }
# ''
# sh ./infra/proxmox-remove.sh --api-url "https://192.168.51.81:8006/api2/json" --username "kiara@ProcoliX" --password "" 7014 # test14
# sh ./infra/proxmox-provision.sh --api-url "https://192.168.51.81:8006/api2/json" --username "kiara@ProcoliX" --password "" test14
# '';
# };
# api_token = "terraform@pve!provider=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
# kiara@ProcoliX!dsdfsfdsfd=30df234b-02f3-4ed9-b778-00d28ad3499c
# # iso: No bootable option or device was found
# apps.default = let
# inherit (pkgs) system;
# deployment =
# (import ./deployment/check/common/data-model.nix {
# inherit system;
# config = {
# targetSystem = system;
# nodeName = "192.168.51.81"; # root@fediversity-proxmox
# pathToRoot = builtins.path {
# path = ./.;
# name = "root";
# };
# sshOpts = [];
# proxmox-user = "kiara@ProcoliX";
# proxmox-password = "";
# node-name = "node051";
# vm-names = [ "test14" ];
# };
# })."bash-proxmox-deployment".bash-proxmox-host;
# in {
# type = "app";
# program = deployment.run;
# };
apps.default = let
inherit (pkgs) system;
deployment =
(import ./deployment/check/common/data-model.nix {
inherit system;
config = {
targetSystem = system;
nodeName = "192.168.51.81"; # root@fediversity-proxmox
pathToRoot = builtins.path {
path = ./.;
name = "root";
};
sshOpts = [];
proxmox-user = "kiara@ProcoliX";
proxmox-password = "";
node-name = "node051";
};
# opt not to pass `inputs`, as we could only pass serializable arguments through to its self-call
})."tf-proxmox-deployment".tf-proxmox-host;
in {
type = "app";
program = deployment.run;
};
};
}
);

View file

@ -10,6 +10,7 @@ in
imports = [
./networking.nix
./users.nix
./repart.nix
];
time.timeZone = "Europe/Amsterdam";

View file

@ -0,0 +1,192 @@
{
config,
pkgs,
lib,
modulesPath,
...
}:
{
imports = [
"${modulesPath}/image/repart.nix"
];
fileSystems = {
# "/" = {
# fsType = "tmpfs";
# options = [
# "size=20%"
# ];
# };
"/" =
let
partConf = config.image.repart.partitions."root".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/var" =
# let
# partConf = config.image.repart.partitions."var".repartConfig;
# in
# {
# device = "/dev/disk/by-partuuid/${partConf.UUID}";
# fsType = partConf.Format;
# };
"/boot" =
let
partConf = config.image.repart.partitions."esp".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/nix/store" =
# let
# partConf = config.image.repart.partitions."store".repartConfig;
# in
# {
# device = "/dev/disk/by-partlabel/${partConf.Label}";
# fsType = partConf.Format;
# };
};
boot.uki.name = "monkey";
# fileSystems."/".device = "/dev/disk/by-label/nixos";
# https://nixos.org/manual/nixos/stable/#sec-image-repart
# https://x86.lol/generic/2024/08/28/systemd-sysupdate.html
image.repart =
let
efiArch = pkgs.stdenv.hostPlatform.efiArch;
in
{
name = config.boot.uki.name;
# name = "image";
# split = true;
partitions = {
"esp" = {
# The contents to end up in the filesystem image.
contents = {
# "/EFI/BOOT/BOOTX64.EFI".source = "${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
"/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
"${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
"/EFI/Linux/${config.system.boot.loader.ukiFile}".source =
"${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
# https://man.archlinux.org/man/loader.conf.5
"/loader/entries/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
# "/loader/loader.conf".source = pkgs.writeText "loader.conf" ''
# timeout 0
# editor yes
# default *
# logLevel=debug
# '';
# nixos-*.conf
# "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
# title NixOS
# linux /EFI/nixos/kernel.efi
# initrd /EFI/nixos/initrd.efi
# options init=/nix/store/.../init root=LABEL=nixos
# '';
# systemd-boot configuration
"/loader/loader.conf".source = (
pkgs.writeText "$out" ''
timeout 3
''
);
};
# https://www.man7.org/linux//man-pages/man5/repart.d.5.html
repartConfig = {
Priority = 1;
Type = "esp";
MountPoint = "/boot";
Format = "vfat";
UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa";
SizeMinBytes = "500M";
SizeMaxBytes = "500M";
};
# repartConfig = {
# Type = "esp";
# UUID = "c12a7328-f81f-11d2-ba4b-00a0c93ec93b"; # Well known
# Format = "vfat";
# SizeMinBytes = "256M";
# SplitName = "-";
# };
};
"root" = {
storePaths = [ config.system.build.toplevel ];
repartConfig = {
Priority = 2;
Type = "root";
Label = "nixos";
MountPoint = "/";
Format = "ext4";
UUID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb";
# populates the fs twice
Minimize = "guess";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "20G";
};
# "store" = {
# storePaths = [ config.system.build.toplevel ];
# stripNixStorePrefix = true;
# repartConfig = {
# Type = "linux-generic";
# Label = "store_${config.system.image.version}";
# Format = "squashfs";
# Minimize = "off";
# ReadOnly = "yes";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "store";
# };
# };
# # Placeholder for the second installed Nix store.
# "store-empty" = {
# repartConfig = {
# Type = "linux-generic";
# Label = "_empty";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "-";
# };
# };
# # Persistent storage
# "var" = {
# repartConfig = {
# Type = "var";
# UUID = "4d21b016-b534-45c2-a9fb-5c16e091fd2d"; # Well known
# Format = "xfs";
# Label = "nixos-persistent";
# Minimize = "off";
# # Has to be large enough to hold update files.
# SizeMinBytes = "2G";
# SizeMaxBytes = "2G";
# SplitName = "-";
# # Wiping this gives us a clean state.
# FactoryReset = "yes";
# };
# };
};
};
};
}

View file

@ -1,4 +1,4 @@
{ ... }:
{ lib, ... }:
{
_class = "nixos";
@ -11,6 +11,12 @@
# ];
boot = {
loader.grub = {
enable = true;
efiSupport = lib.mkDefault true;
efiInstallAsRemovable = lib.mkDefault true;
device = "nodev";
};
initrd = {
availableKernelModules = [
"ata_piix"
@ -25,18 +31,19 @@
disko.devices.disk.main = {
device = "/dev/sda";
type = "disk";
imageSize = "20G"; # needed for image generation
content = {
type = "gpt";
partitions = {
MBR = {
priority = 0;
size = "1M";
type = "EF02";
};
# mbr = {
# priority = 0;
# size = "1M";
# type = "EF02";
# };
ESP = {
esp = {
priority = 1;
size = "500M";
type = "EF00";

View file

@ -7,7 +7,7 @@ set -euC
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
node=node051
readonly tmpdir=/tmp/proxmox-provision-$RANDOM
mkdir $tmpdir
@ -69,6 +69,7 @@ while [ $# -gt 0 ]; do
--api-url|--api_url) readonly api_url="$1"; shift ;;
--username) readonly username="$1"; shift ;;
--password) readonly password="$1"; shift ;;
--node) readonly node="$1"; shift ;;
--debug) debug=true ;;
@ -172,11 +173,24 @@ grab_vm_options () {
printf 'Grabing VM options for VM %s...\n' "$vm_name"
options=$(
nix --extra-experimental-features 'nix-command flakes' eval \
--impure --raw --expr "
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions.$vm_name
" \
--log-format raw --quiet
# nix --extra-experimental-features 'nix-command flakes' eval \
# --impure --raw --expr "
# builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions.$vm_name
# " \
# --log-format raw --quiet
echo '
{
"description":"",
"sockets":1,
"cores":1,
"memory":2048,
"diskSize":32,
"name":"test14",
"vmId":7014,
"hostPublicKey":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHTbxDzq3xFeLvrXs6tyTE08o3CekYZmqFeGmkcHmf21",
"unsafeHostPrivateKey":"-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW\nQyNTUxOQAAACB028Q86t8RXi7617OrckxNPKNwnpGGZqhXhppHB5n9tQAAAIhfhYlCX4WJ\nQgAAAAtzc2gtZWQyNTUxOQAAACB028Q86t8RXi7617OrckxNPKNwnpGGZqhXhppHB5n9tQ\nAAAEAualLRodpovSzGAhza2OVvg5Yp8xv3A7xUNNbKsMTKSHTbxDzq3xFeLvrXs6tyTE08\no3CekYZmqFeGmkcHmf21AAAAAAECAwQF\n-----END OPENSSH PRIVATE KEY-----\n"
}
'
)
vm_id=$(echo "$options" | jq -r .vmId)
@ -220,18 +234,45 @@ build_iso () {
nix_host_keys=
fi
nix --extra-experimental-features 'nix-command flakes' build \
# nix --extra-experimental-features 'nix-command flakes' build \
# --impure --expr "
# let flake = builtins.getFlake (builtins.toString ./.); in
# import ./infra/makeInstallerIso.nix {
# nixosConfiguration = flake.nixosConfigurations.$vm_name;
# # FIXME pass nixpkgs from npins
# $nix_host_keys
# }
# " \
# --log-format raw --quiet \
# --out-link "$tmpdir/installer-$vm_name"
# nix --extra-experimental-features 'nix-command' build \
# --impure --expr "
# import ./infra/makeInstallerIso.nix {
# # nixosConfiguration = $configuration;
# nixosConfiguration = import $configuration;
# $nix_host_keys
# }
# " \
# --log-format raw --quiet \
# --out-link "$tmpdir/installer-$vm_name"
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
nix --extra-experimental-features 'nix-command' build \
--impure --expr "
let flake = builtins.getFlake (builtins.toString ./.); in
import ./infra/makeInstallerIso.nix {
nixosConfiguration = flake.nixosConfigurations.$vm_name;
# FIXME pass nixpkgs from npins
$nix_host_keys
}
(import $configuration).config.system.build.image
" \
--log-format raw --quiet \
--out-link "$tmpdir/installer-$vm_name"
# ls "$tmpdir/installer-$vm_name"
# ls "$tmpdir/installer-$vm_name/image.raw"
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
die 'Something went wrong when building ISO for VM %s.
@ -239,7 +280,8 @@ Check the Nix logs and fix things. Possibly there just is no NixOS configuration
"$vm_name"
fi
ln -sf "$(ls "$tmpdir/installer-$vm_name"/iso/nixos-*.iso)" "$tmpdir/installer-$vm_name.iso"
# ln -sf "$(ls "$tmpdir/installer-$vm_name"/iso/nixos-*.iso)" "$tmpdir/installer-$vm_name.iso"
ln -sf "$(ls "$tmpdir/installer-$vm_name"/image.raw)" "$tmpdir/installer-$vm_name.raw"
printf 'done building ISO for VM %s.\n' "$vm_name"
release_lock build
@ -253,8 +295,8 @@ upload_iso () {
printf 'Uploading ISO for VM %s...\n' "$vm_name"
proxmox_sync POST "$api_url/nodes/$node/storage/local/upload" \
"filename@$tmpdir/installer-$vm_name.iso" \
content==iso
"filename@$tmpdir/installer-$vm_name.raw" \
content==raw
printf 'done uploading ISO for VM %s.\n' "$vm_name"
release_lock upload
@ -266,7 +308,7 @@ upload_iso () {
remove_iso () {
printf 'Removing ISO for VM %s...\n' "$vm_name"
proxmox_sync DELETE "$api_url/nodes/$node/storage/local/content/local:iso/installer-$vm_name.iso"
proxmox_sync DELETE "$api_url/nodes/$node/storage/local/content/local:iso/installer-$vm_name.raw"
printf 'done removing ISO for VM %s.\n' "$vm_name"
}
@ -284,7 +326,7 @@ create_vm () {
pool==Fediversity \
description=="$description" \
\
ide2=="local:iso/installer-$vm_name.iso,media=cdrom" \
ide2=="local:iso/installer-$vm_name.raw,media=cdrom" \
ostype==l26 \
\
bios==ovmf \
@ -360,8 +402,13 @@ provision_vm () (
remove_iso
)
for vm_name in $vm_names; do
provision_vm "$vm_name" &
# FIXME make vm_names a thing from $vm_name to $configuration?
# for vm_name in $vm_names; do
# provision_vm "$vm_name" &
# done
for chunk in $vm_names; do
IFS=: read -r vm_name configuration <<< "$chunk"
provision_vm "$vm_name" "$configuration" &
done
nb_errors=0

View file

@ -7,7 +7,7 @@ set -euC
## FIXME: There seems to be a problem with file upload where the task is
## registered to `node051` no matter what node we are actually uploading to? For
## now, let us just use `node051` everywhere.
readonly node=node051
node=node051
readonly tmpdir=/tmp/proxmox-remove-$RANDOM
mkdir $tmpdir
@ -59,6 +59,7 @@ while [ $# -gt 0 ]; do
--api-url|--api_url) readonly api_url="$1"; shift ;;
--username) readonly username=$1; shift ;;
--password) readonly password=$1; shift ;;
--node) readonly node="$1"; shift ;;
-h|-\?|--help) help; exit 0 ;;

View file

@ -189,9 +189,22 @@
},
"branch": "main",
"submodules": false,
"revision": "48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223.tar.gz",
"hash": "0606qcs8x1jwckd1ivf52rqdmi3lkn66iiqh6ghd4kqx0g2bw3nv"
"revision": "ce8768f43b4374287cd8b88d8fa9c0061e749d9a",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/ce8768f43b4374287cd8b88d8fa9c0061e749d9a.tar.gz",
"hash": "116zplxh64wxbq81wsfkmmssjs1l228kvhxfi9d434xd54k6vr35"
},
"terraform-provider-proxmox": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "bpg",
"repo": "terraform-provider-proxmox"
},
"branch": "main",
"submodules": false,
"revision": "891066821bf7993a5006b12a44c5b36dbdb852d8",
"url": "https://github.com/bpg/terraform-provider-proxmox/archive/891066821bf7993a5006b12a44c5b36dbdb852d8.tar.gz",
"hash": "0nh1b1mgkycjib2hfzgmq142kgklnnhk4rci4339pfgqfi1z841a"
}
},
"version": 5