# hash of our code directory, used to trigger re-deploy # FIXME calculate separately to reduce false positives data "external" "hash" { program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ../../..)\\\"}\""] } # TF resource to build and deploy NixOS instances. resource "terraform_data" "nixos" { # trigger rebuild/deploy if (FIXME?) any potentially used config/code changed, # preventing these (20+s, build being bottleneck) when nothing changed. # terraform-nixos separates these to only deploy if instantiate changed, # yet building even then - which may be not as bad using deploy on remote. # having build/deploy one resource reflects wanting to prevent no-op rebuilds # over preventing (with less false positives) no-op deployments, # as i could not find a way to do prevent no-op rebuilds without merging them: # - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?). # - `data` always runs, which is slow for deploy and especially build. triggers_replace = [ data.external.hash.result, var.host, var.config_nix, var.config_tf, ] provisioner "local-exec" { # directory to run the script from. we use the TF project root dir, # here as a path relative from where TF is run from, # matching calling modules' expectations on config_nix locations. # note that absolute paths can cause false positives in triggers, # so are generally discouraged in TF. working_dir = path.root environment = { system = var.system username = var.username host = var.host config_nix = var.config_nix config_tf = replace(jsonencode(var.config_tf), "\"", "\\\"") } # TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs? # TODO: build on target? command = "sh deploy.sh" } }