forked from Fediversity/Fediversity
239 lines
7 KiB
HCL
239 lines
7 KiB
HCL
terraform {
|
|
required_providers {
|
|
proxmox = {
|
|
source = "bpg/proxmox"
|
|
version = "= 0.76.1"
|
|
}
|
|
}
|
|
}
|
|
|
|
locals {
|
|
system = "x86_64-linux"
|
|
node_name = "node051"
|
|
dump_name = "vzdump-qemu-nixos-fediversity-${var.category}.vma.zst"
|
|
# dependency paths pre-calculated from npins
|
|
pins = jsondecode(file("${path.module}/.npins.json"))
|
|
# nix path: expose pins, use nixpkgs in flake commands (`nix run`)
|
|
nix_path = "${join(":", [for name, dir in local.pins : "${name}=${dir}"])}:flake=${local.pins["nixpkgs"]}:flake"
|
|
config_tf = merge(var.config_tf, {
|
|
})
|
|
# FIXME pass IP from generated VM
|
|
# vm_host = "${var.hostname}.${var.vm_domain}"
|
|
# vm_host = "${proxmox_virtual_environment_vm.nix_vm.ipv4_addresses[0]}"
|
|
vm_host = "fedi202.abundos.eu"
|
|
}
|
|
|
|
# FIXME move to host
|
|
# FIXME add proxmox
|
|
data "external" "base-hash" {
|
|
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ${path.module}/../common/nixos/base.nix)\\\"}\""]
|
|
}
|
|
|
|
# hash of our code directory, used to trigger re-deploy
|
|
# FIXME calculate separately to reduce false positives
|
|
data "external" "hash" {
|
|
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
|
|
}
|
|
|
|
# FIXME move to host
|
|
resource "terraform_data" "template" {
|
|
triggers_replace = [
|
|
data.external.base-hash.result,
|
|
]
|
|
|
|
provisioner "local-exec" {
|
|
working_dir = path.root
|
|
environment = {
|
|
NIX_PATH = local.nix_path
|
|
}
|
|
# FIXME configure to use actual base image
|
|
command = <<-EOF
|
|
set -euo pipefail
|
|
|
|
nixos-generate -f proxmox -o /tmp/nixos-image
|
|
ln -s /tmp/nixos-image/vzdump-qemu-nixos-*.vma.zst /tmp/nixos-image/${local.dump_name}
|
|
EOF
|
|
}
|
|
}
|
|
|
|
# FIXME move to host
|
|
resource "proxmox_virtual_environment_file" "upload" {
|
|
lifecycle {
|
|
replace_triggered_by = [
|
|
terraform_data.template,
|
|
]
|
|
}
|
|
|
|
content_type = "images"
|
|
datastore_id = "local"
|
|
node_name = local.node_name
|
|
overwrite = true
|
|
|
|
source_file {
|
|
path = "/tmp/nixos-image/${local.dump_name}"
|
|
file_name = local.dump_name
|
|
}
|
|
}
|
|
|
|
# FIXME distinguish var.category
|
|
data "proxmox_virtual_environment_vms" "nixos_base" {
|
|
node_name = local.node_name
|
|
filter {
|
|
name = "template"
|
|
values = [true]
|
|
}
|
|
# filter {
|
|
# name = "node_name"
|
|
# values = ["nixos-base"]
|
|
# }
|
|
}
|
|
|
|
resource "proxmox_virtual_environment_vm" "nix_vm" {
|
|
lifecycle {
|
|
replace_triggered_by = [
|
|
proxmox_virtual_environment_file.upload,
|
|
]
|
|
}
|
|
|
|
node_name = local.node_name
|
|
pool_id = "Fediversity"
|
|
description = var.description
|
|
started = true
|
|
|
|
agent {
|
|
enabled = true
|
|
}
|
|
|
|
cpu {
|
|
type = "x86-64-v2-AES"
|
|
cores = var.cores
|
|
sockets = var.sockets
|
|
numa = true
|
|
}
|
|
|
|
memory {
|
|
dedicated = var.memory
|
|
}
|
|
|
|
efi_disk {
|
|
datastore_id = "linstor_storage"
|
|
type = "4m"
|
|
}
|
|
|
|
disk {
|
|
datastore_id = "linstor_storage"
|
|
interface = "scsi0"
|
|
discard = "on"
|
|
iothread = true
|
|
size = var.disk_size
|
|
ssd = true
|
|
}
|
|
|
|
clone {
|
|
datastore_id = "local"
|
|
node_name = data.proxmox_virtual_environment_vms.nixos_base.vms[0].node_name
|
|
vm_id = data.proxmox_virtual_environment_vms.nixos_base.vms[0].vm_id
|
|
full = true
|
|
}
|
|
|
|
network_device {
|
|
model = "virtio"
|
|
bridge = "vnet1306"
|
|
}
|
|
|
|
operating_system {
|
|
type = "l26"
|
|
}
|
|
|
|
scsi_hardware = "virtio-scsi-single"
|
|
bios = "ovmf"
|
|
}
|
|
|
|
# TF resource to build and deploy NixOS instances.
|
|
resource "terraform_data" "nixos" {
|
|
|
|
# trigger rebuild/deploy if (FIXME?) any potentially used config/code changed,
|
|
# preventing these (20+s, build being bottleneck) when nothing changed.
|
|
# terraform-nixos separates these to only deploy if instantiate changed,
|
|
# yet building even then - which may be not as bad using deploy on remote.
|
|
# having build/deploy one resource reflects wanting to prevent no-op rebuilds
|
|
# over preventing (with less false positives) no-op deployments,
|
|
# as i could not find a way to do prevent no-op rebuilds without merging them:
|
|
# - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?).
|
|
# - `data` always runs, which is slow for deploy and especially build.
|
|
triggers_replace = [
|
|
data.external.hash.result,
|
|
var.config_nix_base,
|
|
var.config_nix,
|
|
var.config_tf,
|
|
]
|
|
|
|
provisioner "local-exec" {
|
|
# directory to run the script from. we use the TF project root dir,
|
|
# here as a path relative from where TF is run from,
|
|
# matching calling modules' expectations on config_nix locations.
|
|
# note that absolute paths can cause false positives in triggers,
|
|
# so are generally discouraged in TF.
|
|
working_dir = path.root
|
|
environment = {
|
|
# nix path used on build, lets us refer to e.g. nixpkgs like `<nixpkgs>`
|
|
NIX_PATH = local.nix_path
|
|
}
|
|
# TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs?
|
|
# TODO: build on target?
|
|
command = <<-EOF
|
|
set -euo pipefail
|
|
|
|
# INSTANTIATE
|
|
command=(
|
|
nix-instantiate
|
|
--expr
|
|
'let
|
|
os = import <nixpkgs/nixos> {
|
|
system = "${local.system}";
|
|
configuration = {
|
|
# nix path for debugging
|
|
nix.nixPath = [ "${local.nix_path}" ];
|
|
}
|
|
// ${var.config_nix_base}
|
|
// ${var.config_nix}
|
|
# template parameters passed in from TF thru json
|
|
// builtins.fromJSON "${replace(jsonencode(local.config_tf), "\"", "\\\"")}";
|
|
};
|
|
in
|
|
# info we want to get back out
|
|
{
|
|
substituters = builtins.concatStringsSep " " os.config.nix.settings.substituters;
|
|
trusted_public_keys = builtins.concatStringsSep " " os.config.nix.settings.trusted-public-keys;
|
|
drv_path = os.config.system.build.toplevel.drvPath;
|
|
out_path = os.config.system.build.toplevel;
|
|
}'
|
|
)
|
|
# instantiate the config in /nix/store
|
|
"$${command[@]}" -A out_path
|
|
# get the other info
|
|
json="$("$${command[@]}" --eval --strict --json)"
|
|
|
|
# DEPLOY
|
|
declare substituters trusted_public_keys drv_path
|
|
# set our variables using the json object
|
|
eval "export $(echo $json | jaq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
|
|
host="root@${local.vm_host}" # FIXME: #24
|
|
buildArgs=(
|
|
--option extra-binary-caches https://cache.nixos.org/
|
|
--option substituters $substituters
|
|
--option trusted-public-keys $trusted_public_keys
|
|
)
|
|
sshOpts=(
|
|
-o BatchMode=yes
|
|
-o StrictHostKeyChecking=no
|
|
)
|
|
# get the realized derivation to deploy
|
|
outPath=$(nix-store --realize "$drv_path" "$${buildArgs[@]}")
|
|
# deploy the config by nix-copy-closure
|
|
NIX_SSHOPTS="$${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
|
|
# switch the remote host to the config
|
|
ssh "$${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; $outPath/bin/switch-to-configuration switch"
|
|
EOF
|
|
}
|
|
}
|