bootable vm by repart

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
This commit is contained in:
Kiara Grouwstra 2025-09-23 13:09:52 +02:00
parent b50bb442f7
commit 6426e70b84
Signed by: kiara
SSH key fingerprint: SHA256:COspvLoLJ5WC5rFb9ZDe5urVCkK4LJZOsjfF4duRJFU
11 changed files with 600 additions and 215 deletions

View file

@ -34,5 +34,9 @@ in
description = "the name of the ProxmoX node to use.";
type = types.str;
};
vm-names = mkOption {
description = "The names of VMs to provision.";
type = types.listOf types.str;
};
};
}

View file

@ -27,6 +27,7 @@ let
proxmox-user
proxmox-password
node-name
vm-names
;
inherit (lib) mkOption types;
eval =
@ -123,9 +124,10 @@ let
imports = [
./data-model-options.nix
../common/sharedOptions.nix
# ../common/targetNode.nix
# tests need this, however outside tests this (and esp its import nixos-test-base) must not be used
../common/targetNode.nix
"${nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# ../../../infra/common/nixos/repart.nix
../../../infra/common/nixos/repart.nix
# disko needed in makeInstallerIso.nix
# "${sources.disko}/module.nix"
# ../../../infra/common/proxmox-qemu-vm.nix
@ -250,6 +252,38 @@ let
};
};
};
single-nixos-vm-bash-proxmox = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
{
required-resources,
deployment-name,
}:
{
bash-proxmox-host = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
# ssh = {
# username = "root";
# host = nodeName;
# key-file = null;
# inherit sshOpts;
# };
module = self;
inherit
args
deployment-name
# proxmox-host
proxmox-user
proxmox-password
node-name
vm-names
;
proxmox-host = nodeName;
root-path = pathToRoot;
};
};
};
single-nixos-vm-tf-proxmox = environment: {
resources."operator-environment".login-shell.username = "operator";
implementation =
@ -322,6 +356,17 @@ let
configuration = config."example-configuration";
};
};
"bash-proxmox-deployment" =
let
env = config.environments."single-nixos-vm-bash-proxmox";
in
mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "bash-proxmox-deployment";
configuration = config."example-configuration";
};
};
"tf-proxmox-deployment" =
let
env = config.environments."single-nixos-vm-tf-proxmox";

View file

@ -41,7 +41,7 @@ in
## Memory use is expected to be dominated by the NixOS evaluation,
## which happens on the deployer.
memorySize = 4 * 1024;
diskSize = 4 * 1024;
diskSize = 32 * 1024;
cores = 2;
};

View file

@ -16,6 +16,7 @@ in
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
# FIXME uncomment this when using test over `nix run`
# (modulesPath + "/../lib/testing/nixos-test-base.nix")
./sharedOptions.nix
];
@ -42,7 +43,7 @@ in
networking.firewall.allowedTCPPorts = [ 22 ];
## Test VMs don't have a bootloader by default.
# Test VMs don't have a bootloader by default.
boot.loader.grub.enable = false;
}

View file

@ -5,11 +5,10 @@
...
}:
let
inherit (import ./constants.nix) pathToRoot pathFromRoot;
inherit (pkgs) system;
deployment-config = {
inherit pathToRoot pathFromRoot;
nodeName = "mypve";
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
sshOpts = [ ];
proxmox-user = "root@pam";
@ -29,27 +28,35 @@ let
url = "https://releases.nixos.org/nixos/24.05/nixos-24.05.7139.bcba2fbf6963/nixos-minimal-24.05.7139.bcba2fbf6963-x86_64-linux.iso";
hash = "sha256-plre/mIHdIgU4xWU+9xErP+L4i460ZbcKq8iy2n4HT8=";
};
# machine =
# (import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system;
# inherit (sources) nixpkgs;
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/proxmox.nix";
# configuration = deployment.nixos-configuration; # /nix/store/9nl9q95lvhbr86ys0q2xakr844cg9vym-nixos-generators-1.8.0/share/nixos-generator/configuration.nix
# }).config;
# .config.system.build.toplevel.drvPath
gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
inherit system;
inherit (sources) nixpkgs;
proxmox = {
formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/proxmox.nix";
formatAttr = "VMA";
fileExtension = ".vma.zst";
};
format = proxmox;
# qcow = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow.nix";
# formatAttr = "qcow";
# fileExtension = ".qcow2";
# };
# format = qcow;
# qcow-efi = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow-efi.nix";
# formatAttr = "qcow-efi";
# fileExtension = ".qcow2";
# };
# format = qcow-efi;
inherit (format) formatConfig; # formatAttr fileExtension
# TODO parameterize things to let this flow into the terraform
# btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
inherit system formatConfig;
inherit (sources) nixpkgs;
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/proxmox.nix";
configuration = deployment.nixos-configuration;
};
machine =
# lib.trace ".config: ${builtins.toString (lib.attrNames gen.config)}"
# lib.trace ".config.system: ${builtins.toString (lib.attrNames gen.config.system)}"
# lib.trace ".config.system.build: ${builtins.toString (lib.attrNames gen.config.system.build)}"
# lib.trace ".config.system.build.toplevel: ${builtins.toString (lib.attrNames gen.config.system.build.toplevel)}"
# lib.trace ".config.system.build.toplevel.drvPath: ${builtins.toString (lib.attrNames gen.config.system.build.toplevel.drvPath)}"
gen.config;
machine = gen.config;
in
{
_class = "nixosTest";
@ -62,7 +69,7 @@ in
../../run/tf-proxmox/run.sh
];
nodes.mypve =
nodes.pve =
{ sources, ... }:
{
imports = [
@ -89,7 +96,7 @@ in
};
virtualisation = {
additionalPaths = [ minimalIso ];
diskSize = 4096;
diskSize = 2 * 1024;
memorySize = 2048;
};
};
@ -242,15 +249,15 @@ in
};
extraTestScript = ''
mypve.wait_for_unit("pveproxy.service")
assert "running" in mypve.succeed("pveproxy status")
mypve.succeed("mkdir -p /run/pve")
assert "Proxmox" in mypve.succeed("curl -s -i -k https://localhost:8006")
pve.wait_for_unit("pveproxy.service")
assert "running" in pve.succeed("pveproxy status")
pve.succeed("mkdir -p /run/pve")
assert "Proxmox" in pve.succeed("curl -s -i -k https://localhost:8006")
# mypve.succeed("pvesh set /access/password --userid root@pam --password mypwdlol --confirmation-password mytestpw 1>&2")
# mypve.succeed("curl -s -i -k -d '{\"userid\":\"root@pam\",\"password\":\"mypwdhaha\",\"confirmation-password\":\"mypwdlol\"}' -X PUT https://localhost:8006/api2/json/access/password 1>&2")
# pve.succeed("pvesh set /access/password --userid root@pam --password mypwdlol --confirmation-password mytestpw 1>&2")
# pve.succeed("curl -s -i -k -d '{\"userid\":\"root@pam\",\"password\":\"mypwdhaha\",\"confirmation-password\":\"mypwdlol\"}' -X PUT https://localhost:8006/api2/json/access/password 1>&2")
# on mistake: 401 No ticket
# mypve.succeed("haha")
# pve.succeed("haha")
with subtest("Run the deployment"):
# target.fail("hello 1>&2")

View file

@ -276,16 +276,15 @@ let
};
});
};
tf-proxmox-host = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
type = submodule (tf-host: {
bash-proxmox-host = mkOption {
description = "A bash deployment by SSH to create or update a NixOS VM in ProxmoX.";
type = submodule (bash-proxmox-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
# TODO: add proxmox info
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
@ -303,6 +302,15 @@ let
description = "The path to the root of the repository.";
type = types.path;
};
proxmox-host = mkOption {
description = "The host of the ProxmoX instance to use.";
type = types.str;
default = "192.168.51.81";
};
vm-names = mkOption {
description = "The names of VMs to provision.";
type = types.listOf types.str;
};
proxmox-user = mkOption {
description = "The ProxmoX user to use.";
type = types.str;
@ -317,54 +325,180 @@ let
description = "the name of the ProxmoX node to use.";
type = types.str;
};
# nixos-conf = mkOption {
# type = types.str;
# default = writeConfig {
# inherit (tf-host.config)
# system
# module
# args
# deployment-name
# root-path
# ;
# deployment-type = "tf-proxmox-host";
# };
# };
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# error: The option `.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
deployment-type = "bash-proxmox-host";
inherit (bash-proxmox-host.config)
system
ssh
module
args
deployment-name
root-path
node-name
proxmox-host
proxmox-user
proxmox-password
node-name
# nixos-conf
vm-names
;
inherit (ssh)
host
username
key-file
sshOpts
;
environment = {
key_file = key-file;
ssh_opts = sshOpts;
nixos_conf = writeConfig {
inherit
host
system
module
args
deployment-name
root-path
deployment-type
;
proxmox_user = proxmox-user;
proxmox_password = proxmox-password;
ssh_user = username;
node_name = node-name;
# nixos_conf = nixos-conf;
};
in
pkgs.writers.writeBashBin "provision-proxmox.sh"
(withPackages [
pkgs.httpie
pkgs.jq
])
''
bash ./infra/proxmox-remove.sh \
--api-url "https://${proxmox-host}:8006/api2/json" \
--username "${proxmox-user}" \
--password "${proxmox-password}" \
--node "${node-name}" \
7014
# ^ hardcoded ID of test14
# ${lib.concatStringsSep " " vm-names}
bash ./infra/proxmox-provision.sh \
--api-url "https://${proxmox-host}:8006/api2/json" \
--username "${proxmox-user}" \
--password "${proxmox-password}" \
--node "${node-name}" \
${
# lib.concatStringsSep " " vm-names
lib.concatStringsSep " " (lib.lists.map (k: "${k}:${nixos_conf}") vm-names)
}
# ${lib.concatStringsSep " " vm-names}
'';
};
};
});
};
tf-proxmox-host = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
# type = submodule (tf-host: {
type = submodule (
tf-host:
let
raw = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/raw.nix";
formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/raw-efi.nix";
formatAttr = "raw";
fileExtension = ".img";
};
format = raw;
# qcow = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow.nix";
# formatAttr = "qcow";
# fileExtension = ".qcow2";
# };
# format = qcow;
# qcow-efi = {
# formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/qcow-efi.nix";
# formatAttr = "qcow-efi";
# fileExtension = ".qcow2";
# };
# format = qcow-efi;
inherit (format) formatConfig fileExtension formatAttr;
# inherit (format) formatConfig fileExtension; # formatAttr
in
{
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
# TODO: add proxmox info
module = mkOption {
description = "The module to call to obtain the NixOS configuration from.";
type = types.str;
};
args = mkOption {
description = "The arguments with which to call the module to obtain the NixOS configuration.";
type = types.attrs;
};
deployment-name = mkOption {
description = "The name of the deployment for which to obtain the NixOS configuration.";
type = types.str;
};
root-path = mkOption {
description = "The path to the root of the repository.";
type = types.path;
};
proxmox-user = mkOption {
description = "The ProxmoX user to use.";
type = types.str;
default = "root@pam";
};
# TODO: is sensitivity here handled properly?
proxmox-password = mkOption {
description = "The ProxmoX password to use.";
type = types.str;
};
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
module
args
deployment-name
root-path
proxmox-user
proxmox-password
node-name
;
# image = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# # configuration = import "${pkgs.nixos-generators}/share/nixos-generator/configuration.nix";
# # formatConfig = "${pkgs.nixos-generators}/share/nixos-generator/formats/proxmox.nix";
# configuration = tf-host.config.nixos-configuration;
# # configuration = {
# # imports = [
# # # "${pkgs.nixos-generators}/share/nixos-generator/configuration.nix"
# # # "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
# # # "${sources.agenix}/modules/age.nix"
# # # "${sources.disko}/module.nix"
# # # "${sources.home-manager}/nixos"
# # ];
# # };
# };
# machine = gen.config;
# in
# machine.system.build.${formatAttr};
inherit (ssh)
host
username
key-file
sshOpts
;
deployment-type = "tf-proxmox-host";
nixos_conf = writeConfig {
inherit
system
@ -372,25 +506,79 @@ let
args
deployment-name
root-path
deployment-type
;
deployment-type = "tf-proxmox-host";
};
};
tf-env = pkgs.callPackage ./run/tf-proxmox/tf-env.nix { };
in
pkgs.writers.writeBashBin "deploy-tf-proxmox.sh"
(withPackages [
pkgs.jq
pkgs.nixos-generators
(pkgs.callPackage ./run/tf-proxmox/tf.nix { inherit sources; })
])
''
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox/run.sh
'';
environment = {
key_file = key-file;
ssh_opts = sshOpts;
inherit
host
nixos_conf
;
proxmox_user = proxmox-user;
proxmox_password = proxmox-password;
ssh_user = username;
node_name = node-name;
# image = "${image}/nixos${fileExtension}";
# image = "${image}/nixos.img";
};
# image = "${image}/nixos${fileExtension}";
# image = "${image}/nixos.img";
tf-env = pkgs.callPackage ./run/tf-proxmox/tf-env.nix { };
proxmox-host = "192.168.51.81"; # root@fediversity-proxmox
vm-names = [ "test14" ];
vm_name = "test14";
in
pkgs.writers.writeBashBin "deploy-tf-proxmox.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox/tf.nix { inherit sources; })
])
''
set -xe
# bash ./infra/proxmox-remove.sh \
# --api-url "https://${proxmox-host}:8006/api2/json" \
# --username "${proxmox-user}" \
# --password "${proxmox-password}" \
# --node "${node-name}" \
# 7014
# # ^ hardcoded ID of test14
# # ${lib.concatStringsSep " " vm-names}
# TODO after install: $nix_host_keys
# cp $tmpdir/${vm_name}_host_key /mnt/etc/ssh/ssh_host_ed25519_key
# chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
qemu-img convert -f raw -O qcow2 -C ${(import nixos_conf).config.system.build.image}/monkey.raw /tmp/disk.qcow2
exit 1
# TF_VAR_image=/tmp/disk.qcow2 \
env ${
toString (
lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") (
lib.trace (lib.strings.toJSON environment) environment
)
)
} \
TF_VAR_image=/tmp/disk.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox/run.sh
# env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
# tf_env=${tf-env} bash ./deployment/run/tf-proxmox/run.sh
'';
};
};
};
});
}
);
};
};
in

View file

@ -42,6 +42,11 @@
inherit inputs sources;
};
deployment-model-bash-proxmox = import ./check/data-model-bash-proxmox {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources system;
};
deployment-model-tf-proxmox = import ./check/data-model-tf-proxmox {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources system;

View file

@ -8,9 +8,12 @@ terraform {
}
locals {
dump_name = "vzdump-qemu-nixos-fediversity-${var.category}.vma.zst"
# dump_name = "vzdump-qemu-nixos-fediversity-${var.category}.vma.zst"
dump_name = "vzdump-qemu-nixos-fediversity-${var.category}.raw"
# dump_name = "vzdump-qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
insecure = true
@ -36,86 +39,82 @@ provider "proxmox" {
# csrf_prevention_token = var.virtual_environment_csrf_prevention_token
}
# FIXME move to host
# FIXME add proxmox
data "external" "base-hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ${path.module}/../common/nixos/base.nix)\\\"}\""]
}
# # FIXME move to host
# # FIXME add proxmox
# data "external" "base-hash" {
# program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ${path.module}/../common/nixos/base.nix)\\\"}\""]
# }
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
}
# FIXME move to host
resource "terraform_data" "template" {
# triggers_replace = [
# data.external.base-hash.result,
# ]
provisioner "local-exec" {
working_dir = path.root
# FIXME configure to use actual base image
command = <<-EOF
set -xeuo pipefail
# XXX nixos-generate needs NIX_PATH to have `nixpkgs` set!
nixos-generate -f proxmox -o /tmp/nixos-image
# the above makes /tmp/nixos-image read-only, so our stable file name needs a different directory
mkdir -p /tmp/proxmox-image
ln -sf "$(ls /tmp/nixos-image/vzdump-qemu-nixos-*.vma.zst)" /tmp/proxmox-image/${local.dump_name}
EOF
}
}
# # hash of our code directory, used to trigger re-deploy
# # FIXME calculate separately to reduce false positives
# data "external" "hash" {
# program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
# }
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
resource "proxmox_virtual_environment_file" "upload" {
lifecycle {
replace_triggered_by = [
terraform_data.template,
]
}
# # https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts
# timeouts {
# create = "60m"
# }
content_type = "backup"
# content_type - (Optional) The content type. If not specified, the content type will be inferred from the file extension. Valid values are:
# backup (allowed extensions: .vzdump, .tar.gz, .tar.xz, tar.zst)
# iso (allowed extensions: .iso, .img)
# snippets (allowed extensions: any)
# import (allowed extensions: .raw, .qcow2, .vmdk)
# vztmpl (allowed extensions: .tar.gz, .tar.xz, tar.zst)
# content_type = "backup"
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = "local"
# datastore_id = "local-lvm"
# datastore_id = "backup"
node_name = var.node_name
overwrite = true
# timeout_upload = 3600
timeout_upload = 1
source_file {
path = "/tmp/proxmox-image/${local.dump_name}"
# path = "/tmp/proxmox-image/${local.dump_name}"
path = var.image
file_name = local.dump_name
}
}
# FIXME distinguish var.category
data "proxmox_virtual_environment_vms" "nixos_base" {
node_name = var.node_name
filter {
name = "template"
values = [true]
}
# filter {
# name = "node_name"
# values = ["nixos-base"]
# }
}
# resource "proxmox_virtual_environment_download_file" "latest_ubuntu_22_jammy_qcow2_img" {
# content_type = "import"
# datastore_id = "local"
# node_name = var.node_name
# url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# # need to rename the file to *.qcow2 to indicate the actual file format for import
# file_name = "jammy-server-cloudimg-amd64.qcow2"
# }
resource "proxmox_virtual_environment_vm" "nix_vm" {
lifecycle {
replace_triggered_by = [
proxmox_virtual_environment_file.upload,
]
}
# lifecycle {
# replace_triggered_by = [
# proxmox_virtual_environment_file.upload,
# ]
# }
node_name = var.node_name
pool_id = var.pool_id
description = var.description
started = true
agent {
enabled = true
}
# # https://wiki.nixos.org/wiki/Virt-manager#Guest_Agent
# # services.qemuGuest.enable = true;
# # QEMU guest agent is not running
# agent {
# enabled = true
# # timeout = "15m"
# }
cpu {
type = "x86-64-v2-AES"
@ -128,26 +127,32 @@ resource "proxmox_virtual_environment_vm" "nix_vm" {
dedicated = var.memory
}
efi_disk {
datastore_id = "linstor_storage"
type = "4m"
}
disk {
datastore_id = "linstor_storage"
# datastore_id = "linstor_storage"
datastore_id = "local"
file_format = "raw"
interface = "scsi0"
discard = "on"
iothread = true
size = var.disk_size
ssd = true
backup = false
cache = "none"
# BdsDxe: failed to load Boot0001 "UEFI QEMU QEMU HARDDISK " from PciRoot(0x0)/Pci(0x5,0x0)/Pci(0x1,0x0)/Scsi(0x0,0x0): Not Found
# BdsDxe: No bootable option or device was found.
# BdsDxe: Press any key to enter the Boot Manager Menu.
# import_from = "local:import/vzdump-qemu-nixos-fediversity-test.qcow2"
# import_from = "local:import/vzdump-qemu-nixos-fediversity-test.raw"
import_from = proxmox_virtual_environment_file.upload.id
# import_from = proxmox_virtual_environment_download_file.latest_ubuntu_22_jammy_qcow2_img.id
}
clone {
efi_disk {
# datastore_id = "linstor_storage"
datastore_id = "local"
node_name = data.proxmox_virtual_environment_vms.nixos_base.vms[0].node_name # invalid index: empty list
# node_name = var.node_name
vm_id = data.proxmox_virtual_environment_vms.nixos_base.vms[0].vm_id
full = true
file_format = "raw"
type = "4m"
}
network_device {

View file

@ -42,6 +42,11 @@ variable "ssh_opts" {
default = "[]"
}
variable "image" {
# description = ""
type = string
}
#########################################
variable "category" {

View file

@ -1,69 +1,194 @@
{ config, pkgs, modulesPath, ... }:
{
config,
pkgs,
lib,
modulesPath,
...
}:
{
imports = [ "${modulesPath}/image/repart.nix" ];
imports = [
"${modulesPath}/image/repart.nix"
];
fileSystems."/".device = "/dev/disk/by-label/nixos";
fileSystems = {
# "/" = {
# fsType = "tmpfs";
# options = [
# "size=20%"
# ];
# };
"/" =
let
partConf = config.image.repart.partitions."root".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/var" =
# let
# partConf = config.image.repart.partitions."var".repartConfig;
# in
# {
# device = "/dev/disk/by-partuuid/${partConf.UUID}";
# fsType = partConf.Format;
# };
"/boot" =
let
partConf = config.image.repart.partitions."esp".repartConfig;
in
{
device = "/dev/disk/by-partuuid/${partConf.UUID}";
fsType = partConf.Format;
};
# "/nix/store" =
# let
# partConf = config.image.repart.partitions."store".repartConfig;
# in
# {
# device = "/dev/disk/by-partlabel/${partConf.Label}";
# fsType = partConf.Format;
# };
};
boot.uki.name = "monkey";
# fileSystems."/".device = "/dev/disk/by-label/nixos";
# https://nixos.org/manual/nixos/stable/#sec-image-repart
# https://x86.lol/generic/2024/08/28/systemd-sysupdate.html
image.repart = {
name = "image";
partitions = {
"esp" = {
# The contents to end up in the filesystem image.
contents = {
"/EFI/BOOT/BOOTX64.EFI".source = "${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
# https://man.archlinux.org/man/loader.conf.5
"/loader/entries/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
"/loader/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
# nixos-*.conf
# "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
# title NixOS
# linux /EFI/nixos/kernel.efi
# initrd /EFI/nixos/initrd.efi
# options init=/nix/store/.../init root=LABEL=nixos
# '';
image.repart =
let
efiArch = pkgs.stdenv.hostPlatform.efiArch;
in
{
name = config.boot.uki.name;
# name = "image";
# split = true;
partitions = {
"esp" = {
# The contents to end up in the filesystem image.
contents = {
# "/EFI/BOOT/BOOTX64.EFI".source = "${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
"/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
"${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
"/EFI/Linux/${config.system.boot.loader.ukiFile}".source =
"${config.system.build.uki}/${config.system.boot.loader.ukiFile}";
# https://man.archlinux.org/man/loader.conf.5
"/loader/entries/loader.conf".source = pkgs.writeText "loader.conf" ''
timeout 0
editor yes
default *
logLevel=debug
'';
# "/loader/loader.conf".source = pkgs.writeText "loader.conf" ''
# timeout 0
# editor yes
# default *
# logLevel=debug
# '';
# nixos-*.conf
# "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
# title NixOS
# linux /EFI/nixos/kernel.efi
# initrd /EFI/nixos/initrd.efi
# options init=/nix/store/.../init root=LABEL=nixos
# '';
# systemd-boot configuration
"/loader/loader.conf".source = (
pkgs.writeText "$out" ''
timeout 3
''
);
};
# https://www.man7.org/linux//man-pages/man5/repart.d.5.html
repartConfig = {
Priority = 1;
Type = "esp";
MountPoint = "/boot";
Format = "vfat";
UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa";
SizeMinBytes = "500M";
SizeMaxBytes = "500M";
};
# repartConfig = {
# Type = "esp";
# UUID = "c12a7328-f81f-11d2-ba4b-00a0c93ec93b"; # Well known
# Format = "vfat";
# SizeMinBytes = "256M";
# SplitName = "-";
# };
};
# https://www.man7.org/linux//man-pages/man5/repart.d.5.html
repartConfig = {
Priority = 1;
Type = "esp";
MountPoint = "/boot";
Format = "vfat";
UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa";
SizeMinBytes = "500M";
SizeMaxBytes = "500M";
};
};
"root" = {
storePaths = [ config.system.build.toplevel ];
repartConfig = {
Priority = 2;
Type = "root";
Label = "nixos";
MountPoint = "/";
Format = "ext4";
UUID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb";
# populates the fs twice
Minimize = "guess";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "20G";
"root" = {
storePaths = [ config.system.build.toplevel ];
repartConfig = {
Priority = 2;
Type = "root";
Label = "nixos";
MountPoint = "/";
Format = "ext4";
UUID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb";
# populates the fs twice
Minimize = "guess";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "20G";
};
# "store" = {
# storePaths = [ config.system.build.toplevel ];
# stripNixStorePrefix = true;
# repartConfig = {
# Type = "linux-generic";
# Label = "store_${config.system.image.version}";
# Format = "squashfs";
# Minimize = "off";
# ReadOnly = "yes";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "store";
# };
# };
# # Placeholder for the second installed Nix store.
# "store-empty" = {
# repartConfig = {
# Type = "linux-generic";
# Label = "_empty";
# Minimize = "off";
# SizeMinBytes = "1G";
# SizeMaxBytes = "1G";
# SplitName = "-";
# };
# };
# # Persistent storage
# "var" = {
# repartConfig = {
# Type = "var";
# UUID = "4d21b016-b534-45c2-a9fb-5c16e091fd2d"; # Well known
# Format = "xfs";
# Label = "nixos-persistent";
# Minimize = "off";
# # Has to be large enough to hold update files.
# SizeMinBytes = "2G";
# SizeMaxBytes = "2G";
# SplitName = "-";
# # Wiping this gives us a clean state.
# FactoryReset = "yes";
# };
# };
};
};
};
};
# disko.devices.disk.main = {
# device = "/dev/sda";

View file

@ -234,7 +234,7 @@ build_iso () {
nix_host_keys=
fi
nix --extra-experimental-features 'nix-command flakes' build \
# nix --extra-experimental-features 'nix-command flakes' build \
# --impure --expr "
# let flake = builtins.getFlake (builtins.toString ./.); in
# import ./infra/makeInstallerIso.nix {
@ -263,7 +263,7 @@ build_iso () {
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
# nix --extra-experimental-features 'nix-command' build \
nix --extra-experimental-features 'nix-command' build \
--impure --expr "
(import $configuration).config.system.build.image
" \