split proxmox upload/deploy

Signed-off-by: Kiara Grouwstra <kiara@procolix.eu>
This commit is contained in:
Kiara Grouwstra 2025-10-22 13:50:59 +02:00
parent cf24255ab9
commit 7ee1422f2b
Signed by: kiara
SSH key fingerprint: SHA256:COspvLoLJ5WC5rFb9ZDe5urVCkK4LJZOsjfF4duRJFU
10 changed files with 416 additions and 123 deletions

View file

@ -7,19 +7,30 @@
let
inherit (pkgs) system;
backendPort = builtins.toString 8080;
httpBackend = rec {
tfBackend = fragment: rec {
TF_HTTP_USERNAME = "basic";
TF_HTTP_PASSWORD = "fake-secret";
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/project1/example";
TF_HTTP_LOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_UNLOCK_ADDRESS = TF_HTTP_ADDRESS;
TF_HTTP_ADDRESS = "http://localhost:${backendPort}/state/${fragment}";
};
# FIXME generate the image `nixos-generate` was to make, but now do it for a desired `-c configuration.nix` rather than whatever generic thing now
deployment =
(import ./data-model.nix {
template-deployment =
(import ./setups/template.nix {
inherit sources system;
config = {
inherit httpBackend;
httpBackend = tfBackend "proxmox-test/upload";
nodeName = "pve";
targetSystem = system;
node-name = "pve";
imageDatastoreId = "local";
};
}).default.tf-proxmox-template;
vm-deployment =
(import ./setups/vm.nix {
inherit sources system;
config = {
httpBackend = tfBackend "proxmox-test/nixos";
inherit (import ./constants.nix) pathToRoot;
nodeName = "pve";
targetSystem = system;
@ -37,6 +48,8 @@ let
ipv4Address = "192.168.10.236/24";
ipv6Gateway = "";
ipv6Address = "";
# dynamically get the id from the template upload step
templateId = null;
};
}).default.tf-proxmox-vm;
in
@ -44,6 +57,7 @@ in
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../run/tf-proxmox-template/run.sh
../../run/tf-proxmox-vm/run.sh
../../run/tf-proxmox-vm/await-ssh.sh
];
@ -110,10 +124,12 @@ in
];
environment.systemPackages = [
deployment.run
vm-deployment.run
template-deployment.run
pkgs.pve-manager
pkgs.openssl
pkgs.jq
(pkgs.callPackage ../../run/tf-proxmox-template/tf.nix { })
(pkgs.callPackage ../../run/tf-proxmox-vm/tf.nix { })
];
@ -155,12 +171,19 @@ in
cert = pve.succeed("cat /etc/pve/pve-root-ca.pem").strip()
# set up proxmox
pm_token = pve.succeed("""
set -e
pve.succeed("""
pvesh create /pools --poolid Fediversity
pvesh set /storage/local --content "vztmpl,rootdir,backup,snippets,import,iso,images" 1>/dev/null
pvesh create /access/users/root@pam/token/mytoken --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!mytoken" --roles "PVEVMAdmin PVEDatastoreAdmin PVESDNUser PVETemplateUser"
""")
template_token = pve.succeed("""
pvesh create /access/users/root@pam/token/template --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!template" --roles "PVEDatastoreAdmin"
""").strip()
vm_token = pve.succeed("""
pvesh create /access/users/root@pam/token/vm --output-format json | jq -r .value
pvesh set /access/acl --path "/" --token "root@pam!vm" --roles "PVEVMAdmin PVEDatastoreAdmin PVESDNUser"
""").strip()
# skip indent for EOF
@ -183,7 +206,7 @@ in
""")
deployer.succeed("""
set -xe
set -e
cd /etc/ssl/certs
{ cat ca-bundle.crt
cat ca-certificates.crt
@ -195,12 +218,23 @@ in
openssl verify -CApath /etc/ssl/certs ./pve-root-ca.pem
""")
deploy = f"""
with subtest("Deploy the template"):
template_id = deployer.succeed(f"""
ssh -o BatchMode=yes -o StrictHostKeyChecking=no pve "true"
export PROXMOX_VE_INSECURE="true"
export SSL_CERT_FILE=/tmp/pve-ca-bundle.crt
export PROXMOX_VE_API_TOKEN="root@pam!mytoken={pm_token}"
${lib.getExe deployment.run} | jq -r '.ipv4.value[0]'
export PROXMOX_VE_API_TOKEN="root@pam!template={template_token}"
${lib.getExe template-deployment.run} | jq -r '.id.value'
""").strip()
deploy = f"""
set -e
ssh -o BatchMode=yes -o StrictHostKeyChecking=no pve "true"
export PROXMOX_VE_INSECURE="true"
export SSL_CERT_FILE=/tmp/pve-ca-bundle.crt
export PROXMOX_VE_API_TOKEN="root@pam!vm={vm_token}"
export TF_VAR_template_id="{template_id}"
${lib.getExe vm-deployment.run} | jq -r '.ipv4.value[0]'
"""
with subtest("Run the deployment"):

View file

@ -0,0 +1,63 @@
{
config,
system,
sources ? import ../../../../npins,
...
}:
let
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (config)
nodeName
targetSystem
httpBackend
node-name
imageDatastoreId
;
in
(pkgs.callPackage ../../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell = {
wheel = true;
username = "operator";
};
implementation =
{
required-resources,
...
}:
{
tf-proxmox-template = {
nixos-configuration = mkNixosConfiguration environment required-resources;
system = targetSystem;
ssh = {
host = nodeName;
};
inherit
node-name
httpBackend
imageDatastoreId
;
};
};
};
};
options.default =
let
env = config.environments.default;
in
lib.mkOption {
type = env.resource-mapping.output-type;
default = env.deployment {
deployment-name = "default";
configuration = config."example-configuration";
};
};
}
)

View file

@ -1,16 +1,14 @@
{
config,
system,
sources ? import ../../../npins,
sources ? import ../../../../npins,
...
}@args:
let
# inherit (args) sources;
self = "deployment/check/data-model-tf-proxmox/data-model.nix";
inherit (sources) nixpkgs;
pkgs = import nixpkgs { inherit system; };
inherit (pkgs) lib;
inherit (pkgs.callPackage ../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (pkgs.callPackage ../../common/utils.nix { inherit sources; }) mkNixosConfiguration;
inherit (config)
nodeName
pathToRoot
@ -21,6 +19,7 @@ let
node-name
bridge
vlanId
templateId
imageDatastoreId
vmDatastoreId
cdDatastoreId
@ -30,10 +29,10 @@ let
ipv6Address
;
in
(pkgs.callPackage ../../utils.nix { }).evalModel (
(pkgs.callPackage ../../../utils.nix { }).evalModel (
{ config, ... }:
{
imports = [ ../common/model.nix ];
imports = [ ../../common/model.nix ];
config = {
environments.default = environment: {
resources."operator-environment".login-shell = {
@ -54,7 +53,7 @@ in
host = nodeName;
inherit key-file sshOpts;
};
module = self;
caller = "deployment/check/data-model-tf-proxmox/setups/vm.nix";
inherit
args
deployment-name
@ -62,6 +61,7 @@ in
node-name
bridge
vlanId
templateId
imageDatastoreId
vmDatastoreId
cdDatastoreId

View file

@ -284,8 +284,136 @@ let
};
});
};
tf-proxmox-template = mkOption {
description = ''
A Terraform deployment to upload a virtual machine template to ProxmoX VE.
Proxmox credentials should be set using [environment variables]
(https://registry.terraform.io/providers/bpg/proxmox/latest/docs#environment-variables-summary)
with role `PVEDatastoreAdmin`.
'';
type = submodule (tf-host: {
options = {
system = mkOption {
description = "The architecture of the system to deploy to.";
type = types.str;
};
inherit nixos-configuration;
ssh = host-ssh;
node-name = mkOption {
description = "the name of the ProxmoX node to use.";
type = types.str;
};
httpBackend = mkOption {
description = "environment variables to configure the TF HTTP back-end, see <https://developer.hashicorp.com/terraform/language/backend/http#configuration-variables>";
type = types.attrsOf (types.either types.str types.int);
};
imageDatastoreId = mkOption {
description = "ID of the datastore of the image.";
type = types.str;
default = "local";
};
run = mkOption {
type = types.package;
# error: The option `tf-deployment.tf-host.run' is read-only, but it's set multiple times.
# readOnly = true;
default =
let
inherit (tf-host.config)
system
ssh
httpBackend
node-name
imageDatastoreId
;
inherit (ssh)
host
;
# machine = import nixos_conf;
machine = import ./nixos.nix {
inherit sources system;
configuration = tf-host.config.nixos-configuration;
# configuration = { ... }: {
# imports = [
# tf-host.config.nixos-configuration
# ../infra/common/nixos/repart.nix
# ];
# };
};
# inherit (machine.config.boot.uki) name;
name = "monkey";
# # systemd-repart
# better for cross-compilation, worse for pre-/post-processing, doesn't support MBR: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
# raw = "${machine.config.system.build.image}/${name}.raw";
# disko
# worse for cross-compilation, better for pre-/post-processing, needs manual `imageSize`, random failures: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
raw = "${machine.config.system.build.diskoImages}/main.raw";
# # nixos-generators: note it can straight-up do qcow2 as well, if we settle for nixos-generators
# # `mount: /run/nixos-etc-metadata.J3iARWBtna: failed to setup loop device for /nix/store/14ka2bmx6lcnyr8ah2yl787sqcgxz5ni-etc-metadata.erofs.`
# # [`Error: Failed to parse os-release`](https://github.com/NixOS/nixpkgs/blob/5b1861820a3bc4ef2f60b0afcffb71ea43f5d000/pkgs/by-name/sw/switch-to-configuration-ng/src/src/main.rs#L151)
# raw = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# configuration = tf-host.config.nixos-configuration;
# };
# in
# "${gen.config.system.build.${formatAttr}}/nixos${fileExtension}";
environment = {
inherit
host
;
node_name = node-name;
image_datastore_id = imageDatastoreId;
};
tf-env = pkgs.callPackage ./run/tf-env.nix {
inherit httpBackend;
tfPackage = pkgs.callPackage ./run/tf-proxmox-template/tf.nix { };
tfDirs = [
"deployment/run/tf-proxmox-template"
];
};
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-template.sh"
(withPackages [
pkgs.jq
pkgs.qemu
pkgs.nixos-generators
pkgs.httpie
(pkgs.callPackage ./run/tf-proxmox-vm/tf.nix { })
])
''
set -e
# nixos-generate gives the burden of building revisions, while systemd-repart handles partitioning ~~at the burden of version revisions~~
# .qcow2 is around half the size of .raw, on top of supporting backups - be it apparently at the cost of performance
qemu-img convert -f raw -O qcow2 -C "${raw}" /tmp/${name}.qcow2
ls -l ${raw} >&2
ls -l /tmp/${name}.qcow2 >&2
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
TF_VAR_image=/tmp/${name}.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-template/run.sh
'';
};
};
});
};
tf-proxmox-vm = mkOption {
description = "A Terraform deployment by SSH to update a single existing NixOS host.";
description = ''
A Terraform deployment to provision and update a virtual machine on ProxmoX VE.
Proxmox credentials should be set using [environment variables]
(https://registry.terraform.io/providers/bpg/proxmox/latest/docs#environment-variables-summary)
with roles `PVEVMAdmin PVEDatastoreAdmin PVESDNUser`.
'';
type = submodule (tf-host: {
options = {
system = mkOption {
@ -334,6 +462,11 @@ let
type = types.str;
default = "local";
};
templateId = mkOption {
description = "ID of the template file from which to clone the VM.";
type = types.nullOr types.str;
example = "local:import/template.qcow2";
};
vmDatastoreId = mkOption {
description = "ID of the datastore of the VM.";
type = types.str;
@ -382,6 +515,7 @@ let
bridge
vlanId
imageDatastoreId
templateId
vmDatastoreId
cdDatastoreId
ipv4Gateway
@ -406,43 +540,6 @@ let
deployment-type
;
};
# machine = import nixos_conf;
machine = import ./nixos.nix {
inherit sources system;
configuration = tf-host.config.nixos-configuration;
# configuration = { ... }: {
# imports = [
# tf-host.config.nixos-configuration
# ../infra/common/nixos/repart.nix
# ];
# };
};
# inherit (machine.config.boot.uki) name;
name = "monkey";
# # systemd-repart
# better for cross-compilation, worse for pre-/post-processing, doesn't support MBR: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
# raw = "${machine.config.system.build.image}/${name}.raw";
# disko
# worse for cross-compilation, better for pre-/post-processing, needs manual `imageSize`, random failures: https://github.com/nix-community/disko/issues/550#issuecomment-2503736973
raw = "${machine.config.system.build.diskoImages}/main.raw";
# # nixos-generators: note it can straight-up do qcow2 as well, if we settle for nixos-generators
# # `mount: /run/nixos-etc-metadata.J3iARWBtna: failed to setup loop device for /nix/store/14ka2bmx6lcnyr8ah2yl787sqcgxz5ni-etc-metadata.erofs.`
# # [`Error: Failed to parse os-release`](https://github.com/NixOS/nixpkgs/blob/5b1861820a3bc4ef2f60b0afcffb71ea43f5d000/pkgs/by-name/sw/switch-to-configuration-ng/src/src/main.rs#L151)
# raw = let
# # TODO parameterize things to let this flow into the terraform
# # btw qcow can be made by nixos-generators (qcow, qcow-efi) or by `image.repart`
# # wait, so i generate an image for the nixos config from the data model? how would i then propagate that to deploy?
# gen = import "${pkgs.nixos-generators}/share/nixos-generator/nixos-generate.nix" {
# inherit system formatConfig;
# inherit (sources) nixpkgs;
# configuration = tf-host.config.nixos-configuration;
# };
# in
# "${gen.config.system.build.${formatAttr}}/nixos${fileExtension}";
environment = {
key_file = key-file;
ssh_opts = sshOpts;
@ -455,6 +552,7 @@ let
ssh_user = username;
vlan_id = vlanId;
image_datastore_id = imageDatastoreId;
template_id = templateId;
vm_datastore_id = vmDatastoreId;
cd_datastore_id = cdDatastoreId;
ipv4_gateway = ipv4Gateway;
@ -472,7 +570,7 @@ let
};
vm_name = "test14";
in
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox.sh"
lib.trace (lib.strings.toJSON environment) pkgs.writers.writeBashBin "deploy-tf-proxmox-vm.sh"
(withPackages [
pkgs.jq
pkgs.qemu
@ -489,20 +587,16 @@ let
# cp $tmpdir/${vm_name}_host_key.pub /mnt/etc/ssh/ssh_host_ed25519_key.pub
# chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
# nixos-generate gives the burden of building revisions, while systemd-repart handles partitioning ~~at the burden of version revisions~~
# .qcow2 is around half the size of .raw, on top of supporting backups - be it apparently at the cost of performance
qemu-img convert -f raw -O qcow2 -C "${raw}" /tmp/${name}.qcow2
ls -l ${raw} >&2
ls -l /tmp/${name}.qcow2 >&2
env ${toString (lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") environment)} \
env ${
toString (
lib.mapAttrsToList (k: v: "TF_VAR_${k}=\"${toBash v}\"") (
lib.filterAttrs (_: v: v != null) environment
)
)
} \
${toString (lib.mapAttrsToList (k: v: "${k}=\"${toBash v}\"") httpBackend)} \
TF_VAR_image=/tmp/${name}.qcow2 \
tf_env=${tf-env} bash ./deployment/run/tf-proxmox-vm/run.sh
'';
# # don't really wanna deal with having to do versioned updates for now
# qemu-img convert -f raw -O qcow2 -C "${machine.config.system.build.image}/${name}.raw" /tmp/${name}.qcow2
};
};
});

View file

@ -0,0 +1,63 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "= 0.81.0"
}
}
backend "http" {
}
}
locals {
dump_name = "qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
# used for upload
ssh {
agent = true
username = "root"
}
}
# hash of our code directory, used to trigger re-deploy
# FIXME calculate separately to reduce false positives
data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
# FIXME (un)stream
# FIXME handle known-hosts in TF state
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
# won't notice file changes: https://github.com/bpg/terraform-provider-proxmox/issues/677
resource "proxmox_virtual_environment_file" "upload" {
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = var.image_datastore_id
node_name = var.node_name
overwrite = true
timeout_upload = 500
source_file {
path = var.image
file_name = local.dump_name
# FIXME compute and pass hash (so identical builds don't trigger drift)
# checksum = "sha256"
}
}
output "id" {
value = proxmox_virtual_environment_file.upload.id
}
output "path" {
value = proxmox_virtual_environment_file.upload.source_file[0].file_name
}

View file

@ -0,0 +1,7 @@
#! /usr/bin/env bash
set -euo pipefail
declare tf_env
cd "${tf_env}/deployment/run/tf-proxmox-template"
tofu apply --auto-approve -input=false -parallelism=1 >&2
tofu output -json

View file

@ -0,0 +1,56 @@
# FIXME: use overlays so this gets imported just once?
{
pkgs,
}:
# FIXME centralize overlays
# XXX using recent revision for https://github.com/NixOS/nixpkgs/pull/447849
let
sources = import ../../../npins;
# go_1_25 = pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/development/compilers/go/1.25.nix" { };
# buildGo125Module = pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/build-support/go/module.nix" {
# go = go_1_25;
# };
# go_1_25 = pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/development/compilers/go/1.25.nix" { inherit buildGo125Module; };
mkProvider =
args:
# (pkgs.terraform-providers.override { buildGoModule = pkgs.buildGo124Module; }).mkProvider (
# (pkgs.terraform-providers.override { buildGoModule = pkgs.buildGo124Module; }).mkProvider (
pkgs.terraform-providers.mkProvider (
{ mkProviderFetcher = { repo, ... }: sources.${repo}; } // args
);
in
(
(pkgs.callPackage "${sources.nixpkgs-unstable}/pkgs/by-name/op/opentofu/package.nix" { })
.overrideAttrs
(old: rec {
patches = (old.patches or [ ]) ++ [
# TF with back-end poses a problem for nix: initialization involves both
# mutation (nix: only inside build) and a network call (nix: not inside build)
../../check/data-model-tf/02-opentofu-sandboxed-init.patch
];
# versions > 1.9.0 need go 1.24+
version = "1.9.0";
src = pkgs.fetchFromGitHub {
owner = "opentofu";
repo = "opentofu";
tag = "v${version}";
hash = "sha256-e0ZzbQdex0DD7Bj9WpcVI5roh0cMbJuNr5nsSVaOSu4=";
};
vendorHash = "sha256-fMTbLSeW+pw6GK8/JLZzG2ER90ss2g1FSDX5+f292do=";
})
).withPlugins
(p: [
p.external
p.null
(mkProvider {
owner = "bpg";
repo = "terraform-provider-proxmox";
# 0.82+ need go 1.25
rev = "v0.81.0";
spdx = "MPL-2.0";
hash = null;
vendorHash = "sha256-cpei22LkKqohlE76CQcIL5d7p+BjNcD6UQ8dl0WXUOc=";
homepage = "https://registry.terraform.io/providers/bpg/proxmox";
provider-source-address = "registry.opentofu.org/bpg/proxmox";
})
])

View file

@ -0,0 +1,26 @@
variable "host" {
description = "the host of the ProxmoX Virtual Environment."
type = string
}
variable "node_name" {
description = "the name of the ProxmoX node to use."
type = string
}
variable "image" {
description = "Back-up file to upload."
type = string
}
variable "image_datastore_id" {
description = "ID of the datastore of the image."
type = string
default = "local"
}
variable "category" {
type = string
description = "Category to be used in naming the base image."
default = "test"
}

View file

@ -9,10 +9,6 @@ terraform {
}
}
locals {
dump_name = "qemu-nixos-fediversity-${var.category}.qcow2"
}
# https://registry.terraform.io/providers/bpg/proxmox/latest/docs
provider "proxmox" {
endpoint = "https://${var.host}:8006/"
@ -36,38 +32,6 @@ data "external" "hash" {
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""]
}
# FIXME (un)stream
# FIXME handle known-hosts in TF state
# FIXME move to host
# FIXME switch to base image shared between jobs as upload seems a bottleneck? e.g. by:
# - recursive TF
# - hash in name over overwrite
# won't notice file changes: https://github.com/bpg/terraform-provider-proxmox/issues/677
resource "proxmox_virtual_environment_file" "upload" {
# # https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts
# timeouts {
# create = "60m"
# }
content_type = "import"
# https://192.168.51.81:8006/#v1:0:=storage%2Fnode051%2Flocal:4::=contentIso:::::
# PVE -> Datacenter -> Storage -> local -> Edit -> General -> Content -> check Import + Disk Images -> OK
# that UI action also adds it in `/etc/pve/storage.cfg`
datastore_id = var.image_datastore_id
node_name = var.node_name
overwrite = true
timeout_upload = 500
# timeout_upload = 1
source_file {
# path = "/tmp/proxmox-image/${local.dump_name}"
path = var.image
file_name = local.dump_name
# FIXME compute and pass hash (so identical builds don't trigger drift)
# checksum = "sha256"
}
}
resource "proxmox_virtual_environment_vm" "nix_vm" {
lifecycle {
# wait, would this not disseminate any changes to this property,
@ -110,7 +74,7 @@ resource "proxmox_virtual_environment_vm" "nix_vm" {
ssd = true
backup = false
cache = "none"
import_from = proxmox_virtual_environment_file.upload.id
import_from = var.template_id
}
efi_disk {

View file

@ -30,11 +30,6 @@ variable "ssh_opts" {
default = "[]"
}
variable "image" {
# description = ""
type = string
}
variable "bridge" {
description = "The name of the network bridge (defaults to vmbr0)."
type = string
@ -47,10 +42,9 @@ variable "vlan_id" {
default = 0
}
variable "image_datastore_id" {
description = "ID of the datastore of the image."
variable "template_id" {
description = "ID of the template file from which to clone the VM."
type = string
default = "local"
}
variable "vm_datastore_id" {
@ -89,14 +83,6 @@ variable "ipv6_address" {
default = ""
}
#########################################
variable "category" {
type = string
description = "Category to be used in naming the base image."
default = "test"
}
variable "description" {
type = string
default = ""