Compare commits

...

112 commits

Author SHA1 Message Date
bd0562ca1f
restore disk size
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
6d1ab802ba
add fixme
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
2df71d1643
mv disko
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
e21317f345
rm flake invocation
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
739edf8db8
restore pm bash scripts
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
bbd31c9981
clean up iso tf
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
4a855e1699
make upload depend on hash
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
0cf620ca93
rename module to caller
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
e012d09a36
split proxmox upload/deploy
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
e6c963d273
reduce verbosity
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
66b9ffc4ff
restrict token roles
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
6ea3f9db90
use token
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
fbad0e0355
rm debug log comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
8c1ee75977
rename deployment method to prep for separation
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
0578e1caf7
rm unused options
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
441a04897c
split out TF proxmox data model
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
9a510b642e
limit disk size
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
d0f89a3764
clean some comments
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
0ad4b0b52a
clean out comments
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
f5c7e09216
use insecure for test only
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
f36a3de953
dedupe block
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
8c36da504c
lower memory
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:27 +02:00
b6d372b625
rename bridge
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
530ae7e3bb
fix tf syntax
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
4c07c18e42
pass ipv6
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
f191bd0c47
add no-op second invocation
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
871bcd490f
rm apparently unnecessary gateway stuff
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
6fd7196558
trigger update by codebase hash
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
5bd5ce64d4
add fixmes
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
cbe9898113
rm memory size
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
0b0c0424ed
output id
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
221d1490ad
print to stderr
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
c4abaf7812
todo wire credentials
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
1fddffcfdd
rm debug key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
378da5b634
base-hash
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
72329eafb4
rm comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
0f55f8ccd4
grub default false
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
ee61b72010
pkgs.qemu
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
1851614288
unprint await steps
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
20c2817dcf
rename await
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
e541157e98
fix null resource commands
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
d94d01ff93
make path in single host explicit
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
a3120e361d
working networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
bb987cc3cb
raise limits
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
1800b25f30
working networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
bc658d14d8
include await-ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
28933f3c48
execute await-ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
c308b264ad
pass proxmox user/pw thru env var as well, should fix usage from flake again
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
a2a57471c7
use jumphost for test
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
815fc20017
bootable disk
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
be4cd8cc04
rm broken async_pf fix
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
878650fe5c
rm old networking
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
f058b31cd7
users
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
f328969c9f
factor out await.ssh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
6eca9f80c4
add debugging key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
129c2793a0
fix verification
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
2bddec2902
increase timeout
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
41d412d176
add interface to ipv6 addresses
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
be109e4cc3
don't reinstall bootloader
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:26 +02:00
2ef2ca0b31
allow spaces in ssh opts
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:27:10 +02:00
6b6576fdd2
interface sata2 actually exposes initialization info on the vm
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
3296cb5cec
verbose wait
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
4926a0f646
comment ls echo
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
6983cc0cf7
check hello application
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
e1592207c8
restore jq package
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
7f254eb7c6
enable cloud-init
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
b8bf742667
ssh key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
fa29359c18
rm comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
6255c76164
propagate token
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
f640edc23d
set up proxmox
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
4504e99986
handle CA certificate
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
0510dcc97a
add packages
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:01 +02:00
dd7777007d
add fixmes
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
700b6a4949
prevent kernel panic
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
69bf201311
use json format for output only
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
af6f0bba5a
propagate ssh key
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
64402996f9
default timeouts
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
436367144a
propagate cd
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
12d5c7130b
use variables
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
0373720064
propagate ips
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
537852d0fc
rm ubuntu
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
8df4e58966
output: separate ips
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
8b8d27cd30
apply: json
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
d598d5cb81
un-generators
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
2b0a338148
propagate datastore id
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
506ca1392a
add pve-manager for pvesh
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
08026bb83f
password default
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
0eecc102cf
upstream nimbolus
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
c426896dee
rm output comment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
702f20a974
await SSH availability, resolving a race condition
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
992a937006
un-hardcode networking info
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
d25fa54f04
rm bash-proxmox
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:26:00 +02:00
a5b0f5aa6d
skip acme
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
f68e42d5c0
unlog steps
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
5ffc3be66b
clean up unused vms
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
4c7b2165a1
factor out TF setup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
b45c67cfb1
first time to get nix run pipeline to succeed including update step
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
b026da579c
hardcoded networking setup for nix run vm 101
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
207bb0558d
mv out acme logic
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
e1b6c577ec
start documenting needed proxmox user privileges
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
45ae7535eb
back to qcow over size difference
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
7af8049e64
some cleanup
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
027372d443
automated deployment to proxmox (in nix run) of vm booting to login
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
ce402f6301
simplify
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
593e901a8d
bootable vm by repart
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:56 +02:00
e169abd8c3
get nix run to build a vm bootable by qemu
```
cp /nix/store/.../nixos.img disk.raw
chmod 0644 disk.raw
qemu-system-x86_64 -enable-kvm -m 2048 -drive
if=virtio,file=./disk.raw,format=raw -bios "$(nix eval --impure --expr
'(import <nixpkgs> { }).OVMF.fd.outPath' | jq -r)/FV/OVMF.fd"
```

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:49 +02:00
9966b8a2b4
WIP: proxmox deployment
Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>

continued

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
2025-10-22 15:25:49 +02:00
5e6bbc8b9d support ssh option values containing spaces (#548)
Reviewed-on: fediversity/fediversity#548
2025-10-22 15:23:17 +02:00
18ccd900b2 make TF less chatty (#549)
Reviewed-on: fediversity/fediversity#549
2025-10-22 15:19:22 +02:00
9490612630 rename the caller option to the more apt caller (#547)
Reviewed-on: fediversity/fediversity#547
2025-10-22 15:14:10 +02:00
195bc476be rename pve node in test (#546)
Reviewed-on: fediversity/fediversity#546
2025-10-22 15:04:22 +02:00
575910f29f remove the (so far unused) model options (#545)
Reviewed-on: fediversity/fediversity#545
2025-10-22 15:02:16 +02:00
35 changed files with 1676 additions and 123 deletions

View file

@ -12,7 +12,7 @@ on:
jobs:
_checks:
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
needs: ["deployment-basic","deployment-cli","deployment-model-nixops4","deployment-model-ssh","deployment-model-tf","deployment-model-tf-proxmox","deployment-panel","nixops-deployment-providers-default","nixops-deployment-providers-fedi200","nixops-deployment-providers-fedi201","nixops-deployment-providers-forgejo-ci","nixops-deployment-providers-test","nixops-deployment-providers-vm02116","nixops-deployment-providers-vm02187","nixosConfigurations-fedi200","nixosConfigurations-fedi201","nixosConfigurations-forgejo-ci","nixosConfigurations-test01","nixosConfigurations-test02","nixosConfigurations-test03","nixosConfigurations-test04","nixosConfigurations-test05","nixosConfigurations-test06","nixosConfigurations-test11","nixosConfigurations-test12","nixosConfigurations-test13","nixosConfigurations-test14","nixosConfigurations-vm02116","nixosConfigurations-vm02187","panel","pre-commit","proxmox-basic","test-mastodon-service","test-peertube-service","vmOptions-fedi200","vmOptions-fedi201","vmOptions-test01","vmOptions-test02","vmOptions-test03","vmOptions-test04","vmOptions-test05","vmOptions-test06","vmOptions-test11","vmOptions-test12","vmOptions-test13","vmOptions-test14"]
runs-on: native
steps:
- run: true
@ -53,6 +53,12 @@ jobs:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf -vL
deployment-model-tf-proxmox:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model-tf-proxmox -L
deployment-panel:
runs-on: native
steps:

View file

@ -75,6 +75,7 @@ in
machine =
(pkgs.nixos [
./targetNode.nix
../../../infra/common/nixos/repart.nix
config.system.extraDependenciesFromModule
{
nixpkgs.hostPlatform = "x86_64-linux";

View file

@ -16,7 +16,8 @@ in
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
(modulesPath + "/../lib/testing/nixos-test-base.nix")
# FIXME needed for non-proxmox tests
# (modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
@ -42,8 +43,8 @@ in
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
# Test VMs don't have a bootloader by default.
boot.loader.grub.enable = lib.mkDefault false;
}
(mkIf config.enableAcme {

View file

@ -1,4 +1,5 @@
{
pkgs,
lib,
sources ? import ../../../npins,
...
@ -10,16 +11,103 @@
{
imports = [
../common/sharedOptions.nix
# tests need this, however outside tests this (and esp its import nixos-test-base) must not be used
../common/targetNode.nix
"${sources.nixpkgs}/nixos/modules/profiles/minimal.nix"
# "${nixpkgs}/nixos/modules/profiles/perlless.nix" # failed under disko
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# systemd-repart
# ../../../infra/common/nixos/repart.nix
# disko
"${sources.disko}/module.nix"
../../../infra/common/proxmox-qemu-vm.nix
];
users.users = environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
# # non-disko
# boot.loader.grub.enable = false;
# boot.loader.systemd-boot.enable = true;
# boot.loader.efi.efiSysMountPoint = "/boot";
# boot.loader.systemd-boot.edk2-uefi-shell.enable = true;
# boot.loader.efi.canTouchEfiVariables = true;
# # proxmox.qemuConf.bios == "ovmf";
# boot.growPartition = true;
# boot.loader.timeout = 1;
nixpkgs.hostPlatform = "x86_64-linux";
system.stateVersion = "25.05";
services.qemuGuest.enable = true;
systemd.services.qemu-guest-agent = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
};
services.openssh = {
enable = true;
settings.PasswordAuthentication = false;
};
networking = {
firewall.enable = false;
useDHCP = false;
usePredictableInterfaceNames = false;
useNetworkd = true;
nameservers = [
"95.215.185.6"
"95.215.185.7"
"2a00:51c0::5fd7:b906"
"2a00:51c0::5fd7:b907"
];
};
security.sudo.wheelNeedsPassword = false;
nix.settings.trusted-users = [ "@wheel" ];
services.cloud-init = {
enable = true;
network.enable = true;
};
users.mutableUsers = false;
users.users =
{
root = {
# password = "password"; # cannot log in
# hashedPassword = "$y$j9T$QoArNaV2VrjPhQ6BMG1AA.$uq8jw0.g.dJwIfepqipxzeUD1ochgUs8A5QmVe4qbJ6"; # cannot log in
hashedPasswordFile = builtins.toString (
pkgs.writeText "root-password" "$y$j9T$9g0NqdBsKvQ3ETOPPB0hW.$cIiG648jgA/eVqiCPJJZtI5JYiL6oODZtKI6.lCmJA/"
);
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFZsldWMEsajYysjYsEpNvMOjO4D8L21pTrfQS1T+Hfy"
];
};
# can log in
kiara = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
# cannot log in
operator = {
isNormalUser = true;
extraGroups = [ "wheel" ];
password = "password";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDHTIqF4CAylSxKPiSo5JOPuocn0y2z38wOSsQ1MUaZ2"
];
};
}
// environment.config.resources."operator-environment".login-shell.apply {
resources = lib.filterAttrs (_name: value: value ? login-shell) (
lib.concatMapAttrs (
k': req: lib.mapAttrs' (k: lib.nameValuePair "${k'}.${k}") req.resources
) requests
);
};
};
}

View file

@ -5,7 +5,6 @@
...
}@args:
let
self = "deployment/check/data-model-ssh/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
@ -40,7 +39,7 @@ in
key-file = null;
inherit sshOpts;
};
module = self;
caller = "deployment/check/data-model-ssh/data-model.nix";
inherit args deployment-name;
root-path = pathToRoot;
};

View file

@ -19,10 +19,6 @@ let
in
{
_class = "nixosTest";
imports = [
./options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
@ -31,7 +27,6 @@ in
../../run/ssh-single-host/run.sh
../../../npins/default.nix
../../../npins/sources.json
./options.nix
./constants.nix
];

View file

@ -1,15 +0,0 @@
{
lib,
...
}:
let
inherit (lib) mkOption types;
in
{
options = {
targetSystem = mkOption {
type = types.str;
description = "name of the host to deploy to";
};
};
}

View file

@ -0,0 +1,10 @@
{
targetMachines = [
"pve"
];
pathToRoot = builtins.path {
path = ../../..;
name = "root";
};
pathFromRoot = "/deployment/check/data-model-tf-proxmox";
}

View file

@ -0,0 +1,51 @@
{
inputs,
sources,
system,
}:
let
pkgs = import sources.nixpkgs-stable {
inherit system;
overlays = [ overlay ];
};
overlay = _: prev: {
terraform-backend =
prev.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/te/terraform-backend/package.nix"
{ };
inherit
(import "${sources.proxmox-nixos}/pkgs" {
craneLib = pkgs.callPackage "${sources.crane}/lib" { };
# breaks from https://github.com/NixOS/nixpkgs/commit/06b354eb2dc535c57e9b4caaa16d79168f117a26,
# which updates libvncserver to 0.9.15, which was not yet patched at https://git.proxmox.com/?p=vncterm.git.
inherit pkgs;
# not so picky about version for our purposes
pkgs-unstable = pkgs;
})
proxmox-ve
pve-manager
pve-ha-manager
pve-qemu
;
};
in
pkgs.testers.runNixOSTest {
node.specialArgs = {
inherit
sources
pkgs
;
};
imports = [
../../data-model.nix
../../function.nix
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix)
targetMachines
pathToRoot
pathFromRoot
;
}

View file

@ -0,0 +1,252 @@
{
lib,
pkgs,
sources,
...
}:
let
inherit (pkgs) system;
backendPort = builtins.toString 8080;
tfBackend = fragment: rec {
TF_HTTP_USERNAME = "basic";
TF_HTTP_PASSWORD = "fake-secret";
TF_HTTP_LOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_UNLOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/${fragment}";
};
# FIXME generate the image `nixos-generate` was to make, but now do it for a desired `-c configuration.nix` rather than whatever generic thing now
template-deployment =
(import ./setups/template.nix {
inherit sources system;
config = {
httpBackend = tfBackend "proxmox-test/upload";
nodeName = "pve";
targetSystem = system;
node-name = "pve";
imageDatastoreId = "local";
};
}).default.tf-proxmox-template;
vm-deployment =
(import ./setups/vm.nix {
inherit sources system;
config = {
httpBackend = tfBackend "proxmox-test/nixos";
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
sshOpts = [
"ProxyCommand=ssh -W %h:%p pve"
];
key-file = "/root/.ssh/id_ed25519";
node-name = "pve";
bridge = "br0";
vlanId = 0;
imageDatastoreId = "local";
vmDatastoreId = "local";
cdDatastoreId = "local";
ipv4Gateway = "192.168.10.1";
ipv4Address = "192.168.10.236/24";
ipv6Gateway = "";
ipv6Address = "";
# dynamically get the id from the template upload step
templateId = null;
};
}).default.tf-proxmox-vm;
in
{
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox-template/run.sh
../../run/tf-proxmox-vm/run.sh
../../run/tf-proxmox-vm/await-ssh.sh
];
nodes.pve =
{ sources, ... }:
{
imports = [
"${sources.proxmox-nixos}/modules/proxmox-ve"
];
environment.systemPackages = [
pkgs.jq
pkgs.qemu
];
networking.firewall.enable = false;
networking.vlans = {
vlan0 = {
id = 0;
interface = "eth0";
};
};
networking.useDHCP = false;
networking = {
bridges.br0.interfaces = [ ];
interfaces.br0.ipv4.addresses = [
{
address = "192.168.10.1";
prefixLength = 24;
}
];
nat = {
enable = true;
internalInterfaces = [ "br0" ];
};
};
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
users.users.root = {
password = "mytestpw";
hashedPasswordFile = lib.mkForce null;
};
# https://github.com/SaumonNet/proxmox-nixos/blob/main/modules/proxmox-ve/default.nix
services.proxmox-ve = {
enable = true;
ipAddress = "192.168.1.1";
};
virtualisation = {
diskSize = 5 * 1024;
memorySize = 3 * 1024;
};
};
nodes.deployer =
{ ... }:
{
imports = [
../../modules/terraform-backend
];
networking.firewall.enable = false;
nix.nixPath = [
(lib.concatStringsSep ":" (lib.mapAttrsToList (k: v: k + "=" + v) sources))
];
environment.systemPackages = [
vm-deployment.run
template-deployment.run
pkgs.pve-manager
pkgs.openssl
pkgs.jq
(pkgs.callPackage ../../run/tf-proxmox-template/tf.nix { })
(pkgs.callPackage ../../run/tf-proxmox-vm/tf.nix { })
];
# needed only when building from deployer
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
];
};
system.extraDependencies = [
sources.disko
pkgs.ubootQemuX86
pkgs.ubootQemuX86.inputDerivation
pkgs.pve-qemu
pkgs.pve-qemu.inputDerivation
pkgs.gnu-config
pkgs.byacc
pkgs.stdenv
pkgs.stdenvNoCC
sources.nixpkgs
pkgs.vte
];
services.terraform-backend = {
enable = true;
settings = {
LISTEN_ADDR = ":${backendPort}";
KMS_KEY = "tsjxw9NjKUBUlzbTnD7orqIAdEmpGYRARvxD51jtY+o=";
};
};
};
extraTestScript = ''
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
pve.succeed("mkdir -p /run/pve")
assert "Proxmox" in pve.succeed("curl -s -i -k https://localhost:8006")
cert = pve.succeed("cat /etc/pve/pve-root-ca.pem").strip()
# set up proxmox
pve.succeed("""
pvesh create /pools --poolid Fediversity
pvesh set /storage/local --content "vztmpl,rootdir,backup,snippets,import,iso,images" 1>/dev/null
""")
template_token = pve.succeed("""
pvesh create /access/users/root@pam/token/template --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!template" --roles "PVEDatastoreAdmin"
""").strip()
vm_token = pve.succeed("""
pvesh create /access/users/root@pam/token/vm --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!vm" --roles "PVEVMAdmin PVEDatastoreAdmin PVESDNUser"
""").strip()
# skip indent for EOF
deployer.succeed(f"""
cat > /etc/ssl/certs/pve-root-ca.pem <<EOF
{cert}
EOF
mkdir -p /root/.ssh
cat > /root/.ssh/id_ed25519 <<EOF
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACBWbJXVjBLGo2MrI2LBKTbzDozuA/C9taU630EtU/h38gAAAJDAOy8uwDsv
LgAAAAtzc2gtZWQyNTUxOQAAACBWbJXVjBLGo2MrI2LBKTbzDozuA/C9taU630EtU/h38g
AAAECcF8xjLavgWePoVx45Euewsh6Kw07L6QDDy3WXFCn4bFZsldWMEsajYysjYsEpNvMO
jO4D8L21pTrfQS1T+HfyAAAAC2tpYXJhQG5peG9zAQI=
-----END OPENSSH PRIVATE KEY-----
EOF
chmod 600 /root/.ssh/id_ed25519
""")
deployer.succeed("""
set -e
cd /etc/ssl/certs
{ cat ca-bundle.crt
cat ca-certificates.crt
cat pve-root-ca.pem
} > new-ca-bundle.crt
rm ca-bundle.crt ca-certificates.crt
mv new-ca-bundle.crt ca-bundle.crt
ln -s ca-bundle.crt ca-certificates.crt
openssl verify -CApath /etc/ssl/certs ./pve-root-ca.pem
""")
with subtest("Deploy the template"):
template_id = deployer.succeed(f"""
ssh -o BatchMode=yes -o StrictHostKeyChecking=no pve "true"
export PROXMOX_VE_INSECURE="true"
export SSL_CERT_FILE=/tmp/pve-ca-bundle.crt
export PROXMOX_VE_API_TOKEN="root@pam!template={template_token}"
${lib.getExe template-deployment.run} | jq -r '.id.value'
""").strip()
deploy = f"""
set -e
ssh -o BatchMode=yes -o StrictHostKeyChecking=no pve "true"
export PROXMOX_VE_INSECURE="true"
export SSL_CERT_FILE=/tmp/pve-ca-bundle.crt
export PROXMOX_VE_API_TOKEN="root@pam!vm={vm_token}"
export TF_VAR_template_id="{template_id}"
${lib.getExe vm-deployment.run} | jq -r '.ipv4.value[0]'
"""
with subtest("Run the deployment"):
ip = deployer.succeed(deploy).strip()
with subtest("Verify package"):
deployer.succeed(f"""
ssh -i "/root/.ssh/id_ed25519" -o StrictHostKeyChecking=no -o BatchMode=yes -J pve root@{ip} su - operator -c hello >&2
""")
with subtest("No-op update"):
deployer.succeed(deploy, timeout=120)
'';
}

View file

@ -0,0 +1,63 @@
{
config,
system,
sources ? import ../../../../npins,
...
}:
let
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (config)
nodeName
targetSystem
httpBackend
node-name
imageDatastoreId
;
in
(pkgs.callPackage ../../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell = {
wheel = true;
username = "operator";
};
implementation =
{
required-resources,
...
}:
{
tf-proxmox-template = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
host = nodeName;
};
inherit
node-name
httpBackend
imageDatastoreId
;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -0,0 +1,90 @@
{
config,
system,
sources ? import ../../../../npins,
...
}@args:
let
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (config)
nodeName
pathToRoot
targetSystem
sshOpts
httpBackend
key-file
node-name
bridge
vlanId
templateId
imageDatastoreId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
in
(pkgs.callPackage ../../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell = {
wheel = true;
username = "operator";
};
implementation =
{
required-resources,
deployment-name,
}:
{
tf-proxmox-vm = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
username = "root";
host = nodeName;
inherit key-file sshOpts;
};
caller = "deployment/check/data-model-tf-proxmox/setups/vm.nix";
inherit
args
deployment-name
httpBackend
node-name
bridge
vlanId
templateId
imageDatastoreId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
root-path = pathToRoot;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -5,7 +5,6 @@
...
}@args:
let
self = "deployment/check/data-model-tf/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
@ -40,7 +39,7 @@ in
key-file = null;
inherit sshOpts;
};
module = self;
caller = "deployment/check/data-model-tf/data-model.nix";
inherit args deployment-name httpBackend;
root-path = pathToRoot;
};

View file

@ -28,10 +28,6 @@ let
in
{
_class = "nixosTest";
imports = [
./options.nix
];
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-single-host/run.sh

View file

@ -1,25 +0,0 @@
{
lib,
...
}:
let
inherit (lib) mkOption types;
in
{
options = {
targetSystem = mkOption {
type = types.str;
description = "name of the host to deploy to";
};
sshOpts = mkOption {
description = "Extra SSH options (`-o`) to use.";
type = types.listOf types.str;
default = [ ];
example = "ConnectTimeout=60";
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
};
}

View file

@ -1,5 +1,4 @@
{
inputs,
lib,
hostPkgs,
config,
@ -151,17 +150,6 @@ in
(import ../../../panel { }).module
];
## FIXME: This should be in the common stuff.
security.acme = {
acceptTerms = true;
defaults.email = "test@test.com";
defaults.server = "https://acme.test/dir";
};
security.pki.certificateFiles = [
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
];
networking.extraHosts = "${config.acmeNodeIP} acme.test";
services.panel = {
enable = true;
production = true;

View file

@ -15,7 +15,7 @@ in
{
name = "proxmox-basic";
nodes.mypve =
nodes.pve =
{ sources, ... }:
{
imports = [
@ -44,41 +44,41 @@ in
};
testScript = ''
machine.start()
machine.wait_for_unit("pveproxy.service")
assert "running" in machine.succeed("pveproxy status")
pve.start()
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
# Copy Iso
machine.succeed("mkdir -p /var/lib/vz/template/iso/")
machine.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
pve.succeed("mkdir -p /var/lib/vz/template/iso/")
pve.succeed("cp ${minimalIso} /var/lib/vz/template/iso/minimal.iso")
# Declarative VM creation
machine.wait_for_unit("multi-user.target")
machine.succeed("qm stop 100 --timeout 0")
pve.wait_for_unit("multi-user.target")
pve.succeed("qm stop 100 --timeout 0")
# Seabios VM creation
machine.succeed(
pve.succeed(
"qm create 101 --kvm 0 --bios seabios -cdrom local:iso/minimal.iso",
"qm start 101",
"qm stop 101 --timeout 0"
)
# Legacy ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 102 --kvm 0 --bios ovmf -cdrom local:iso/minimal.iso",
"qm start 102",
"qm stop 102 --timeout 0"
)
# UEFI ovmf vm creation
machine.succeed(
pve.succeed(
"qm create 103 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m -cdrom local:iso/minimal.iso",
"qm start 103",
"qm stop 103 --timeout 0"
)
# UEFI ovmf vm creation with secure boot
machine.succeed(
pve.succeed(
"qm create 104 --kvm 0 --bios ovmf --efidisk0 local:4,efitype=4m,pre-enrolled-keys=1 -cdrom local:iso/minimal.iso",
"qm start 104",
"qm stop 104 --timeout 0"

View file

@ -3,6 +3,7 @@
lib,
config,
inputs,
sources ? import ../npins,
...
}:
let
@ -30,13 +31,13 @@ let
writeConfig =
{
system,
module,
caller,
root-path,
deployment-type,
deployment-name,
args,
}:
# having a `module` location and (serializable) `args`, we know
# having a `caller` location and (serializable) `args`, we know
# enough to call it again to extract different info elsewhere later.
# we use this to make a deployment script using the desired nixos config,
# which would otherwise not be serializable, while nix also makes it hard to
@ -46,7 +47,7 @@ let
pkgs.writers.writeText "configuration.nix" ''
import ${root-path}/deployment/nixos.nix {
system = "${system}";
configuration = (import "${root-path}/${module}" (builtins.fromJSON "${
configuration = (import "${root-path}/${caller}" (builtins.fromJSON "${
lib.replaceStrings [ "\"" ] [ "\\\"" ] (lib.strings.toJSON args)
}")).${deployment-name}.${deployment-type}.nixos-configuration;
}
@ -121,8 +122,8 @@ let
};
inherit nixos-configuration;
ssh = host-ssh;
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
caller = mkOption {
description = "The calling module to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
@ -147,7 +148,7 @@ let
inherit (ssh-host.config)
system
ssh
module
caller
args
deployment-name
root-path
@ -168,7 +169,7 @@ let
nixos_conf = writeConfig {
inherit
system
module
caller
args
deployment-name
root-path
@ -204,8 +205,8 @@ let
};
inherit nixos-configuration;
ssh = host-ssh;
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
caller = mkOption {
description = "The calling module to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
@ -233,7 +234,7 @@ let
inherit (tf-host.config)
system
ssh
module
caller
args
deployment-name
root-path
@ -255,7 +256,7 @@ let
nixos_conf = writeConfig {
inherit
system
module
caller
args
deployment-name
root-path
@ -283,6 +284,322 @@ let
};
});
};
tf-proxmox-template = mkOption {
description = ''
A Terraform deployment to upload a virtual machine template to ProxmoX VE.
Proxmox credentials should be set using [environment variables]
(https://registry.terraform.io/providers/bpg/proxmox/latest/docs#environment-variables-summary)
with role `PVEDatastoreAdmin`.
'';
type = submodule (tf-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
imageDatastoreId = mkOption {
description = "ID of the datastore of the image.";
type = types.str;
default = "local";
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
httpBackend
node-name
imageDatastoreId
;
inherit (ssh)
host
;
# machine = import nixos_conf;
machine = import ./nixos.nix {
inherit sources system;
configuration = tf-host.config.nixos-configuration;
# configuration = { ... }: {
# imports = [
# tf-host.config.nixos-configuration
# ../infra/common/nixos/repart.nix
# ];
# };
};
# inherit (machine.config.boot.uki) name;
name = "monkey";
# # systemd-repart
# better for cross-compilation, worse for pre-/post-processing, doesn't support MBR: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
# raw = "${machine.config.system.build.image}/${name}.raw";
# disko
# worse for cross-compilation, better for pre-/post-processing, needs manual `imageSize`, random failures: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
raw = "${machine.config.system.build.diskoImages}/main.raw";
# # nixos-generators: note it can straight-up do qcow2 as well, if we settle for nixos-generators
# # `mount: /run/nixos-etc-metadata.J3iARWBtna: failed to setup loop device for /nix/store/14ka2bmx6lcnyr8ah2yl787sqcgxz5ni-etc-metadata.erofs.`
# # [`Error: Failed to parse os-release`](https://github.com/NixOS/nixpkgs/blob/5b1861820a3bc4ef2f60b0afcffb71ea43f5d000/pkgs/by-name/sw/switch-to-configuration-ng/src/src/main.rs#L151)
# raw = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# configuration = tf-host.config.nixos-configuration;
# };
# in
# "${gen.config.system.build.${formatAttr}}/nixos${fileExtension}";
environment = {
inherit
host
;
node_name = node-name;
image_datastore_id = imageDatastoreId;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-template/tf.nix { };
tfDirs = [
"deployment/run/tf-proxmox-template"
];
};
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-template.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
# nixos-generate gives the burden of building revisions, while systemd-repart handles partitioning ~~at the burden of version revisions~~
# .qcow2 is around half the size of .raw, on top of supporting backups - be it apparently at the cost of performance
qemu-img convert -f raw -O qcow2 -C "${raw}" /tmp/${name}.qcow2
ls -l ${raw} >&2
ls -l /tmp/${name}.qcow2 >&2
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
TF_VAR_image=/tmp/${name}.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-template/run.sh
'';
};
};
});
};
tf-proxmox-vm = mkOption {
description = ''
A Terraform deployment to provision and update a virtual machine on ProxmoX VE.
Proxmox credentials should be set using [environment variables]
(https://registry.terraform.io/providers/bpg/proxmox/latest/docs#environment-variables-summary)
with roles `PVEVMAdmin PVEDatastoreAdmin PVESDNUser`.
'';
type = submodule (tf-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
caller = mkOption {
description = "The calling module to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
description = "The arguments with which to call the module to obtain the NixOS configuration.";
type = types.attrs;
};
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
};
root-path = mkOption {
description = "The path to the root of the repository.";
type = types.path;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
bridge = mkOption {
description = "The name of the network bridge (defaults to vmbr0).";
type = types.str;
default = "vmbr0";
};
vlanId = mkOption {
description = "The VLAN identifier.";
type = types.int;
default = 0;
};
imageDatastoreId = mkOption {
description = "ID of the datastore of the image.";
type = types.str;
default = "local";
};
templateId = mkOption {
description = "ID of the template file from which to clone the VM.";
type = types.nullOr types.str;
example = "local:import/template.qcow2";
};
vmDatastoreId = mkOption {
description = "ID of the datastore of the VM.";
type = types.str;
default = "local";
};
cdDatastoreId = mkOption {
description = "ID of the datastore of the virtual CD-rom drive to use for cloud-init.";
type = types.str;
default = "local";
};
ipv4Gateway = mkOption {
description = "Gateway for IPv4.";
type = types.str;
default = "";
};
ipv4Address = mkOption {
description = "IPv4 address.";
type = types.str;
default = "";
};
ipv6Gateway = mkOption {
description = "Gateway for IPv6.";
type = types.str;
default = "";
};
ipv6Address = mkOption {
description = "IPv6 address.";
type = types.str;
default = "";
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
caller
args
deployment-name
httpBackend
root-path
node-name
bridge
vlanId
imageDatastoreId
templateId
vmDatastoreId
cdDatastoreId
ipv4Gateway
ipv4Address
ipv6Gateway
ipv6Address
;
inherit (ssh)
host
username
key-file
sshOpts
;
deployment-type = "tf-proxmox-vm";
nixos_conf = writeConfig {
inherit
system
caller
args
deployment-name
root-path
deployment-type
;
};
environment = {
key_file = key-file;
ssh_opts = sshOpts;
inherit
host
nixos_conf
bridge
;
node_name = node-name;
ssh_user = username;
vlan_id = vlanId;
image_datastore_id = imageDatastoreId;
template_id = templateId;
vm_datastore_id = vmDatastoreId;
cd_datastore_id = cdDatastoreId;
ipv4_gateway = ipv4Gateway;
ipv4_address = ipv4Address;
ipv6_gateway = ipv6Gateway;
ipv6_address = ipv6Address;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { };
tfDirs = [
"deployment/run/tf-single-host"
"deployment/run/tf-proxmox-vm"
];
};
vm_name = "test14";
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-vm.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
env ${
toString (
lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") (
lib.filterAttrs (_: v: v != null) environment
)
)
} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-vm/run.sh
'';
};
};
});
};
};
in
{

View file

@ -40,6 +40,10 @@
deployment-model-tf = import ./check/data-model-tf {
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./check/data-model-tf-proxmox {
inherit inputs sources system;
};
};
};
}

View file

@ -1,9 +1,10 @@
{
configuration,
system,
sources ? import ../npins,
...
}:
let
sources = import ../npins;
eval = import "${sources.nixpkgs}/nixos/lib/eval-config.nix" {
inherit system;
specialArgs = {

View file

@ -1,23 +1,33 @@
#! /usr/bin/env bash
set -xeuo pipefail
declare username host key_file ssh_opts nixos_conf
IFS=" " read -r -a ssh_opts <<< "$( (echo "$ssh_opts" | jq -r '@sh') | tr -d \'\")"
readarray -t ssh_opts < <(echo "$ssh_opts" | jq -r '.[]')
# DEPLOY
sshOpts=(
sshOptsInit=(
-o BatchMode=yes
-o StrictHostKeyChecking=no
)
for ssh_opt in "${ssh_opts[@]}"; do
sshOpts+=(
-o "$ssh_opt"
)
done
if [[ -n "$key_file" ]]; then
sshOpts+=(
sshOptsInit+=(
-i "$key_file"
)
fi
# [@] will quote variables containing spaces itself
sshOptsAt=("${sshOptsInit[@]}")
for ssh_opt in "${ssh_opts[@]}"; do
sshOptsAt+=(
-o "${ssh_opt}"
)
done
# [*] needs manual quoting
sshOptsAsterisk=("${sshOptsInit[@]}")
for ssh_opt in "${ssh_opts[@]}"; do
sshOptsAsterisk+=(
-o "\"${ssh_opt}\""
)
done
destination="$username@$host"
command=(nix-instantiate --show-trace "${nixos_conf}")
@ -32,9 +42,9 @@ command=(nix-instantiate --show-trace "${nixos_conf}")
# FIXME explore import/readFile as ways to instantiate the derivation, potentially allowing to realize the store path up-front from Nix?
outPath=$(nix-store --realize "$("${command[@]}" -A config.system.build.toplevel.drvPath --eval --strict --json | jq -r '.')")
# deploy the config by nix-copy-closure
NIX_SSHOPTS="${sshOpts[*]}" nix-copy-closure --to "$destination" "$outPath" --gzip --use-substitutes
NIX_SSHOPTS="${sshOptsAsterisk[*]}" nix-copy-closure --to "$destination" "$outPath" --gzip --use-substitutes
# switch the remote host to the config
# shellcheck disable=SC2029
ssh "${sshOpts[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath"
ssh "${sshOptsAt[@]}" "$destination" "nix-env --profile /nix/var/nix/profiles/system --set $outPath"
# shellcheck disable=SC2029
ssh -o "ConnectTimeout=1" -o "ServerAliveInterval=1" "${sshOpts[@]}" "$destination" "nohup $outPath/bin/switch-to-configuration switch &" 2>&1
ssh -o "ConnectTimeout=5" -o "ServerAliveInterval=1" "${sshOptsAt[@]}" "$destination" "nohup env NIXOS_INSTALL_BOOTLOADER=0 $outPath/bin/switch-to-configuration switch &" 2>&1

View file

@ -0,0 +1,66 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "= 0.81.0"
}
}
backend "http" {
}
}
locals {
dump_name = "qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
# used for upload
ssh {
agent = true
username = "root"
}
}
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
# FIXME (un)stream
# FIXME handle known-hosts in TF state
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
# won't notice file changes: https://github.com/bpg/terraform-provider-proxmox/issues/677
resource "proxmox_virtual_environment_file" "upload" {
depends_on = [
data.external.hash,
]
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = var.image_datastore_id
node_name = var.node_name
overwrite = true
timeout_upload = 500
source_file {
path = var.image
file_name = local.dump_name
# FIXME compute and pass hash (so identical builds don't trigger drift)
# checksum = "sha256"
}
}
output "id" {
value = proxmox_virtual_environment_file.upload.id
}
output "path" {
value = proxmox_virtual_environment_file.upload.source_file[0].file_name
}

View file

@ -0,0 +1,7 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-template"
tofu apply --auto-approve -input=false -parallelism=1 >&2
tofu output -json

View file

@ -0,0 +1,48 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
}:
# FIXME centralize overlays
# XXX using recent revision for https://github.com/NixOS/nixpkgs/pull/447849
let
sources = import ../../../npins;
mkProvider =
args:
pkgs.terraform-providers.mkProvider (
{ mkProviderFetcher = { repo, ... }: sources.${repo}; } // args
);
in
(
(pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/op/opentofu/package.nix" { })
.overrideAttrs
(old: rec {
patches = (old.patches or [ ]) ++ [
# TF with back-end poses a problem for nix: initialization involves both
# mutation (nix: only inside build) and a network call (nix: not inside build)
../../check/data-model-tf/02-opentofu-sandboxed-init.patch
];
# versions > 1.9.0 need go 1.24+
version = "1.9.0";
src = pkgs.fetchFromGitHub {
owner = "opentofu";
repo = "opentofu";
tag = "v${version}";
hash = "sha256-e0ZzbQdex0DD7Bj9WpcVI5roh0cMbJuNr5nsSVaOSu4=";
};
vendorHash = "sha256-fMTbLSeW+pw6GK8/JLZzG2ER90ss2g1FSDX5+f292do=";
})
).withPlugins
(p: [
p.external
(mkProvider {
owner = "bpg";
repo = "terraform-provider-proxmox";
# 0.82+ need go 1.25
rev = "v0.81.0";
spdx = "MPL-2.0";
hash = null;
vendorHash = "sha256-cpei22LkKqohlE76CQcIL5d7p+BjNcD6UQ8dl0WXUOc=";
homepage = "https://registry.terraform.io/providers/bpg/proxmox";
provider-source-address = "registry.opentofu.org/bpg/proxmox";
})
])

View file

@ -0,0 +1,26 @@
variable "host" {
description = "the host of the ProxmoX Virtual Environment."
type = string
}
variable "node_name" {
description = "the name of the ProxmoX node to use."
type = string
}
variable "image" {
description = "Back-up file to upload."
type = string
}
variable "image_datastore_id" {
description = "ID of the datastore of the image."
type = string
default = "local"
}
variable "category" {
type = string
description = "Category to be used in naming the base image."
default = "test"
}

View file

@ -0,0 +1,31 @@
#! /usr/bin/env bash
set -euo pipefail
declare username host key_file ssh_opts
readarray -t ssh_opts < <(echo "$ssh_opts" | jq -r '.[]')
sshOpts=(
-o BatchMode=yes \
-o StrictHostKeyChecking=no \
-o ConnectTimeout=5 \
-o ServerAliveInterval=5 \
)
if [[ -n "${key_file}" ]]; then
sshOpts+=(
-i "${key_file}"
)
fi
for ssh_opt in "${ssh_opts[@]}"; do
sshOpts+=(
-o "${ssh_opt}"
)
done
for i in $(seq 1 30); do
if ssh "${sshOpts[@]}" "${username}@${host}" "true"; then
exit 0
fi
echo "Waiting for SSH (attempt #$i)..."
sleep 5
done
echo "SSH never came up!" >&2
exit 1

View file

@ -0,0 +1,145 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "= 0.81.0"
}
}
backend "http" {
}
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
# used only for files and creating custom disks
ssh {
agent = true
# uncomment and configure if using api_token instead of password
username = "root"
# node {
# name = "${var.node_name}"
# address = "${var.host}"
# # port = 22
# }
}
}
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
resource "proxmox_virtual_environment_vm" "nix_vm" {
lifecycle {
# wait, would this not disseminate any changes to this property,
# or just defer syncing when only this changed?
ignore_changes = [
disk["import_from"],
initialization,
]
}
node_name = var.node_name
pool_id = var.pool_id
description = var.description
started = true
# https://wiki.nixos.org/wiki/Virt-manager#Guest_Agent
agent {
enabled = true
timeout = "2m"
trim = true
}
cpu {
type = "x86-64-v2-AES"
cores = var.cores
sockets = var.sockets
numa = true
}
memory {
dedicated = var.memory
}
disk {
datastore_id = var.vm_datastore_id
file_format = "qcow2"
interface = "scsi0"
discard = "on"
iothread = true
size = var.disk_size
ssd = true
backup = false
cache = "none"
import_from = var.template_id
}
efi_disk {
datastore_id = var.vm_datastore_id
file_format = "qcow2"
type = "4m"
}
network_device {
model = "virtio"
bridge = var.bridge
vlan_id = var.vlan_id
}
operating_system {
type = "l26"
}
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
initialization {
datastore_id = var.cd_datastore_id
interface = "sata2"
ip_config {
ipv4 {
gateway = var.ipv4_gateway
address = var.ipv4_address
}
ipv6 {
gateway = var.ipv6_gateway
address = var.ipv6_address
}
}
}
}
resource "null_resource" "await_ssh" {
depends_on = [
proxmox_virtual_environment_vm.nix_vm
]
provisioner "local-exec" {
command = "env username='root' host='${proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1][0]}' key_file=${var.key_file} ssh_opts='${var.ssh_opts}' bash ./await-ssh.sh"
}
}
module "nixos-rebuild" {
depends_on = [
data.external.hash,
null_resource.await_ssh,
]
source = "../tf-single-host"
nixos_conf = var.nixos_conf
username = "root"
host = proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1][0]
key_file = var.key_file
ssh_opts = var.ssh_opts
}
output "id" {
value = proxmox_virtual_environment_vm.nix_vm.vm_id
}
output "ipv4" {
value = proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[1]
}
output "ipv6" {
value = [ for elem in proxmox_virtual_environment_vm.nix_vm.ipv6_addresses[1] : "${elem}%${proxmox_virtual_environment_vm.nix_vm.network_interface_names[1]}" ]
}

View file

@ -0,0 +1,8 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-vm"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -input=false -parallelism=1 >&2
tofu output -json

View file

@ -0,0 +1,49 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
}:
# FIXME centralize overlays
# XXX using recent revision for https://github.com/NixOS/nixpkgs/pull/447849
let
sources = import ../../../npins;
mkProvider =
args:
pkgs.terraform-providers.mkProvider (
{ mkProviderFetcher = { repo, ... }: sources.${repo}; } // args
);
in
(
(pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/op/opentofu/package.nix" { })
.overrideAttrs
(old: rec {
patches = (old.patches or [ ]) ++ [
# TF with back-end poses a problem for nix: initialization involves both
# mutation (nix: only inside build) and a network call (nix: not inside build)
../../check/data-model-tf/02-opentofu-sandboxed-init.patch
];
# versions > 1.9.0 need go 1.24+
version = "1.9.0";
src = pkgs.fetchFromGitHub {
owner = "opentofu";
repo = "opentofu";
tag = "v${version}";
hash = "sha256-e0ZzbQdex0DD7Bj9WpcVI5roh0cMbJuNr5nsSVaOSu4=";
};
vendorHash = "sha256-fMTbLSeW+pw6GK8/JLZzG2ER90ss2g1FSDX5+f292do=";
})
).withPlugins
(p: [
p.external
p.null
(mkProvider {
owner = "bpg";
repo = "terraform-provider-proxmox";
# 0.82+ need go 1.25
rev = "v0.81.0";
spdx = "MPL-2.0";
hash = null;
vendorHash = "sha256-cpei22LkKqohlE76CQcIL5d7p+BjNcD6UQ8dl0WXUOc=";
homepage = "https://registry.terraform.io/providers/bpg/proxmox";
provider-source-address = "registry.opentofu.org/bpg/proxmox";
})
])

View file

@ -0,0 +1,119 @@
variable "nixos_conf" {
description = "The path to the NixOS configuration to deploy."
type = string
}
variable "ssh_user" {
description = "the SSH user to use"
type = string
default = "root"
}
variable "host" {
description = "the host of the ProxmoX Virtual Environment."
type = string
}
variable "node_name" {
description = "the name of the ProxmoX node to use."
type = string
}
variable "key_file" {
description = "path to the user's SSH private key"
type = string
}
variable "ssh_opts" {
description = "Extra SSH options (`-o`) to use."
type = string
default = "[]"
}
variable "bridge" {
description = "The name of the network bridge (defaults to vmbr0)."
type = string
default = "vmbr0"
}
variable "vlan_id" {
description = "The VLAN identifier."
type = number
default = 0
}
variable "template_id" {
description = "ID of the template file from which to clone the VM."
type = string
}
variable "vm_datastore_id" {
description = "ID of the datastore of the VM."
type = string
default = "local"
}
variable "cd_datastore_id" {
description = "ID of the datastore of the virtual CD-rom drive to use for cloud-init."
type = string
default = "local"
}
variable "ipv4_gateway" {
description = "Gateway for IPv4."
type = string
default = ""
}
variable "ipv4_address" {
description = "IPv4 address."
type = string
default = ""
}
variable "ipv6_gateway" {
description = "Gateway for IPv6."
type = string
default = ""
}
variable "ipv6_address" {
description = "IPv6 address."
type = string
default = ""
}
variable "description" {
type = string
default = ""
}
variable "sockets" {
type = number
description = "The number of sockets of the VM."
default = 1
}
variable "cores" {
type = number
description = "The number of cores of the VM."
default = 1
}
variable "memory" {
type = number
description = "The amount of memory of the VM in MiB."
default = 2048
}
variable "disk_size" {
type = number
description = "The amount of disk of the VM in GiB."
default = 32
}
variable "pool_id" {
type = string
description = "The identifier for a pool to assign the virtual machine to."
default = "Fediversity"
}

View file

@ -1,9 +1,7 @@
#! /usr/bin/env bash
set -xeuo pipefail
set -euo pipefail
declare tf_env
export TF_LOG=info
cd "${tf_env}/deployment/run/tf-single-host"
# parallelism=1: limit OOM risk
tofu apply --auto-approve -parallelism=1

View file

@ -10,6 +10,7 @@ in
imports = [
./networking.nix
./users.nix
./repart.nix
];
time.timeZone = "Europe/Amsterdam";

View file

@ -0,0 +1,192 @@
{
config,
pkgs,
lib,
modulesPath,
...
}:
{
imports = [
"${modulesPath}/image/repart.nix"
];
fileSystems = {
# "/" = {
# fsType = "tmpfs";
# options = [
# "size=20%"
# ];
# };
"/" =
let
partConf = config.image.repart.partitions."root".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/var" =
# let
# partConf = config.image.repart.partitions."var".repartConfig;
# in
# {
# device = "/dev/disk/by-partuuid/${partConf.UUID}";
# fsType = partConf.Format;
# };
"/boot" =
let
partConf = config.image.repart.partitions."esp".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/nix/store" =
# let
# partConf = config.image.repart.partitions."store".repartConfig;
# in
# {
# device = "/dev/disk/by-partlabel/${partConf.Label}";
# fsType = partConf.Format;
# };
};
boot.uki.name = "monkey";
# fileSystems."/".device = "/dev/disk/by-label/nixos";
# https://nixos.org/manual/nixos/stable/#sec-image-repart
# https://x86.lol/generic/2024/08/28/systemd-sysupdate.html
image.repart =
let
efiArch = pkgs.stdenv.hostPlatform.efiArch;
in
{
name = config.boot.uki.name;
# name = "image";
# split = true;
partitions = {
"esp" = {
# The contents to end up in the filesystem image.
contents = {
# "/EFI/BOOT/BOOTX64.EFI".source = "${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
"/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
"${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
"/EFI/Linux/${config.system.boot.loader.ukiFile}".source =
"${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
# https://man.archlinux.org/man/loader.conf.5
"/loader/entries/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
# "/loader/loader.conf".source = pkgs.writeText "loader.conf" ''
# timeout 0
# editor yes
# default *
# logLevel=debug
# '';
# nixos-*.conf
# "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
# title NixOS
# linux /EFI/nixos/kernel.efi
# initrd /EFI/nixos/initrd.efi
# options init=/nix/store/.../init root=LABEL=nixos
# '';
# systemd-boot configuration
"/loader/loader.conf".source = (
pkgs.writeText "$out" ''
timeout 3
''
);
};
# https://www.man7.org/linux//man-pages/man5/repart.d.5.html
repartConfig = {
Priority = 1;
Type = "esp";
MountPoint = "/boot";
Format = "vfat";
UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa";
SizeMinBytes = "500M";
SizeMaxBytes = "500M";
};
# repartConfig = {
# Type = "esp";
# UUID = "c12a7328-f81f-11d2-ba4b-00a0c93ec93b"; # Well known
# Format = "vfat";
# SizeMinBytes = "256M";
# SplitName = "-";
# };
};
"root" = {
storePaths = [ config.system.build.toplevel ];
repartConfig = {
Priority = 2;
Type = "root";
Label = "nixos";
MountPoint = "/";
Format = "ext4";
UUID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb";
# populates the fs twice
Minimize = "guess";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "20G";
};
# "store" = {
# storePaths = [ config.system.build.toplevel ];
# stripNixStorePrefix = true;
# repartConfig = {
# Type = "linux-generic";
# Label = "store_${config.system.image.version}";
# Format = "squashfs";
# Minimize = "off";
# ReadOnly = "yes";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "store";
# };
# };
# # Placeholder for the second installed Nix store.
# "store-empty" = {
# repartConfig = {
# Type = "linux-generic";
# Label = "_empty";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "-";
# };
# };
# # Persistent storage
# "var" = {
# repartConfig = {
# Type = "var";
# UUID = "4d21b016-b534-45c2-a9fb-5c16e091fd2d"; # Well known
# Format = "xfs";
# Label = "nixos-persistent";
# Minimize = "off";
# # Has to be large enough to hold update files.
# SizeMinBytes = "2G";
# SizeMaxBytes = "2G";
# SplitName = "-";
# # Wiping this gives us a clean state.
# FactoryReset = "yes";
# };
# };
};
};
};
}

View file

@ -1,4 +1,4 @@
{ ... }:
{ lib, ... }:
{
_class = "nixos";
@ -11,6 +11,14 @@
# ];
boot = {
loader = {
systemd-boot.enable = true;
efi = {
canTouchEfiVariables = true;
efiSysMountPoint = "/boot";
};
grub.enable = false;
};
initrd = {
availableKernelModules = [
"ata_piix"
@ -22,24 +30,35 @@
};
};
fileSystems."/boot" = {
fsType = "vfat";
device = lib.mkDefault "/dev/sda1";
options = [
"fmask=0022"
"dmask=0022"
];
};
disko.devices.disk.main = {
device = "/dev/sda";
type = "disk";
imageSize = "20G"; # needed for image generation
content = {
type = "gpt";
partitions = {
MBR = {
priority = 0;
size = "1M";
type = "EF02";
};
# mbr = {
# priority = 0;
# size = "1M";
# type = "EF02";
# };
ESP = {
esp = {
priority = 1;
size = "500M";
type = "EF00";
label = "boot";
content = {
type = "filesystem";
format = "vfat";

View file

@ -202,9 +202,22 @@
},
"branch": "main",
"submodules": false,
"revision": "48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/48f39fbe2e8f90f9ac160dd4b6929f3ac06d8223.tar.gz",
"hash": "0606qcs8x1jwckd1ivf52rqdmi3lkn66iiqh6ghd4kqx0g2bw3nv"
"revision": "ce8768f43b4374287cd8b88d8fa9c0061e749d9a",
"url": "https://github.com/SaumonNet/proxmox-nixos/archive/ce8768f43b4374287cd8b88d8fa9c0061e749d9a.tar.gz",
"hash": "116zplxh64wxbq81wsfkmmssjs1l228kvhxfi9d434xd54k6vr35"
},
"terraform-provider-proxmox": {
"type": "Git",
"repository": {
"type": "GitHub",
"owner": "bpg",
"repo": "terraform-provider-proxmox"
},
"branch": "main",
"submodules": false,
"revision": "891066821bf7993a5006b12a44c5b36dbdb852d8",
"url": "https://github.com/bpg/terraform-provider-proxmox/archive/891066821bf7993a5006b12a44c5b36dbdb852d8.tar.gz",
"hash": "0nh1b1mgkycjib2hfzgmq142kgklnnhk4rci4339pfgqfi1z841a"
}
},
"version": 5