Compare commits

...
Sign in to create a new pull request.

12 commits

Author SHA1 Message Date
ccf442b099
reproduce infinite recursion
the challenge here seems to stem from a mutual dependency between imports and `inputs`:

```
  at /nix/store/zid48nap5bh52n8d2gc5kbnzhxy3cm0k-source/deployment/data-model.nix:31:7:
    30|     staticModules = [
    31|       inputs.nixops4.modules.nixops4Deployment.default
      |       ^
    32|
… while calling anonymous lambda
  at /nix/store/hjb1rqv2mfs5ny47amj2gsc8xk05x5g6-source/lib/modules.nix:513:35:
   512|       context = name: ''while evaluating the module argument `${name}' in "${key}":'';
   513|       extraArgs = mapAttrs (name: _:
      |                                   ^
   514|         addErrorContext (context name)
… while evaluating the module argument `inputs' in "/nix/store/zid48nap5bh52n8d2gc5kbnzhxy3cm0k-source/deployment/data-model.nix":
… while evaluating the attribute 'inputs'
  at /nix/store/zid48nap5bh52n8d2gc5kbnzhxy3cm0k-source/deployment/check/data-model/deployment.nix:15:17:
    14|       specialArgs = {
    15|         inherit inputs;
      |                 ^
    16|       };
… from call site
  at /nix/store/zid48nap5bh52n8d2gc5kbnzhxy3cm0k-source/deployment/check/data-model/deployment.nix:15:17:
    14|       specialArgs = {
    15|         inherit inputs;
      |                 ^
    16|       };
```
2025-08-01 20:38:43 +02:00
6635a11832
scaffold deployment/check/data-model from ./basic 2025-08-01 16:20:22 +02:00
9c219341b1 Merge pull request 'move nixops4Deployment class' (#6) from kiara/Fediversity:data-model-fix-root-class into deployment-data-model-with-tests
Reviewed-on: fricklerhandwerk/Fediversity#6
Reviewed-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2025-07-31 18:17:56 +02:00
8e8787d662
move nixops4Deployment class 2025-07-31 17:13:24 +02:00
7ce3902851
put config stuff in an attrset 2025-07-31 17:08:40 +02:00
68b834b6d7
fix linter gripes 2025-07-31 17:03:43 +02:00
1063be8c16 add explanatory comment 2025-07-29 17:20:46 +02:00
35521fb40e implement and test data model for runtime environments 2025-07-29 17:07:33 +02:00
16d3c512e0 generalize function type 2025-07-29 17:06:32 +02:00
4509d277d3 move arguments from _module.args to specialArgs (#469)
Reviewed-on: Fediversity/Fediversity#469
Reviewed-by: Valentin Gagarin <valentin.gagarin@tweag.io>
Co-authored-by: Kiara Grouwstra <kiara@procolix.eu>
Co-committed-by: Kiara Grouwstra <kiara@procolix.eu>
2025-07-23 18:12:55 +02:00
e488230d7b updater: make npins command verbose (#477)
Reviewed-on: Fediversity/Fediversity#477
Co-authored-by: Kiara Grouwstra <kiara@procolix.eu>
Co-committed-by: Kiara Grouwstra <kiara@procolix.eu>
2025-07-19 13:00:33 +02:00
765183cd0d fix typo in users (#475)
Reviewed-on: Fediversity/Fediversity#475
Co-authored-by: Kiara Grouwstra <kiara@procolix.eu>
Co-committed-by: Kiara Grouwstra <kiara@procolix.eu>
2025-07-17 19:02:14 +02:00
15 changed files with 652 additions and 36 deletions

View file

@ -56,3 +56,9 @@ jobs:
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-panel -L
check-deployment-model:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.deployment-model -L

View file

@ -13,7 +13,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Update pins
run: nix-shell --run "npins update"
run: nix-shell --run "npins --verbose update"
- name: Create PR
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
with:

View file

@ -11,7 +11,8 @@ let
;
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
inputs = (import-flake { src = ./.; }).inputs;
inherit (inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check =
(import "${git-hooks}/nix" {
@ -78,6 +79,7 @@ in
# re-export inputs so they can be overridden granularly
# (they can't be accessed from the outside any other way)
inherit
inputs
sources
system
pkgs

View file

@ -0,0 +1,200 @@
{
inputs,
lib,
config,
hostPkgs,
sources,
...
}:
let
inherit (builtins)
concatStringsSep
toJSON
;
inherit (lib)
types
fileset
mkOption
genAttrs
attrNames
optionalString
;
inherit (hostPkgs)
runCommandNoCC
writeText
system
;
forConcat = xs: f: concatStringsSep "\n" (map f xs);
## We will need to override some inputs by the empty flake, so we make one.
emptyFlake = runCommandNoCC "empty-flake" { } ''
mkdir $out
echo "{ outputs = { self }: {}; }" > $out/flake.nix
'';
in
{
_class = "nixosTest";
imports = [
./sharedOptions.nix
];
options = {
## FIXME: I wish I could just use `testScript` but with something like
## `mkOrder` to put this module's string before something else.
extraTestScript = mkOption { };
sourceFileset = mkOption {
## REVIEW: Upstream to nixpkgs?
type = types.mkOptionType {
name = "fileset";
description = "fileset";
descriptionClass = "noun";
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
merge = (_: defs: fileset.unions (map (x: x.value) defs));
};
description = ''
A fileset that will be copied to the deployer node in the current
working directory. This should contain all the files that are
necessary to run that particular test, such as the NixOS
modules necessary to evaluate a deployment.
'';
};
};
config = {
sourceFileset = fileset.unions [
# NOTE: not the flake itself; it will be overridden.
../../../mkFlake.nix
../../../flake.lock
../../../npins
./sharedOptions.nix
./targetNode.nix
./targetResource.nix
(config.pathToCwd + "/flake-under-test.nix")
];
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
nodes =
{
deployer = {
imports = [ ./deployerNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
};
}
//
(
if config.enableAcme then
{
acme = {
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
## break things once we play with DNS?
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
## We aren't testing ACME - we just want certificates.
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
};
}
else
{ }
)
//
genAttrs config.targetMachines (_: {
imports = [ ./targetNode.nix ];
_module.args = { inherit inputs sources; };
enableAcme = config.enableAcme;
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
});
testScript = ''
${forConcat (attrNames config.nodes) (n: ''
${n}.start()
'')}
${forConcat (attrNames config.nodes) (n: ''
${n}.wait_for_unit("multi-user.target")
'')}
## A subset of the repository that is necessary for this test. It will be
## copied inside the test. The smaller this set, the faster our CI, because we
## won't need to re-run when things change outside of it.
with subtest("Unpacking"):
deployer.succeed("cp -r --no-preserve=mode ${
fileset.toSource {
root = ../../..;
fileset = config.sourceFileset;
}
}/* .")
with subtest("Configure the network"):
${forConcat config.targetMachines (
tm:
let
targetNetworkJSON = writeText "target-network.json" (
toJSON config.nodes.${tm}.system.build.networkConfig
);
in
''
deployer.copy_from_host("${targetNetworkJSON}", "${config.pathFromRoot}/${tm}-network.json")
''
)}
with subtest("Configure the deployer key"):
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
${forConcat config.targetMachines (tm: ''
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
'')}
with subtest("Configure the target host key"):
${forConcat config.targetMachines (tm: ''
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
'')}
## NOTE: This is super slow. It could probably be optimised in Nix, for
## instance by allowing to grab things directly from the host's store.
##
## NOTE: We use the repository as-is (cf `src` above), overriding only
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
## lock file to use locally available inputs, as we cannot download them.
##
with subtest("Override the flake and its lock"):
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
deployer.succeed("""
nix flake lock --extra-experimental-features 'flakes nix-command' \
--offline -v \
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
\
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
--override-input nixops4-nixos/nixops4 ${
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
} \
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
;
""")
${optionalString config.enableAcme ''
with subtest("Set up handmade DNS"):
deployer.succeed("echo '${config.nodes.acme.networking.primaryIPAddress}' > ${config.pathFromRoot}/acme_server_ip")
''}
${config.extraTestScript}
'';
};
}

View file

@ -0,0 +1,8 @@
{
targetMachines = [
"hello"
"cowsay"
];
pathToRoot = ../../..;
pathFromRoot = ./.;
}

View file

@ -0,0 +1,14 @@
{
runNixOSTest,
inputs,
sources,
}:
runNixOSTest {
imports = [
../common/nixosTest.nix
./nixosTest.nix
];
_module.args = { inherit inputs sources; };
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
}

View file

@ -0,0 +1,61 @@
{
inputs,
sources,
lib,
...
}:
let
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
eval =
module:
(lib.evalModules {
specialArgs = {
inherit inputs;
};
modules = [
module
../../data-model.nix
];
}).config;
fediversity = eval (
{ ... }:
{
config = {
environments.single-nixos-vm =
{ ... }:
{
implementation = requests: {
input = requests;
output =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources = lib.genAttrs targetMachines (nodeName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
../../data-model.nix
../../function.nix
../common/targetResource.nix
];
_module.args = { inherit inputs sources; };
inherit nodeName pathToRoot pathFromRoot;
nixos.module =
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.${nodeName} ];
};
});
};
};
};
};
}
);
in
fediversity.environments.single-nixos-vm.deployment {
enable = true;
}

View file

@ -0,0 +1,22 @@
{
inputs = {
nixops4.follows = "nixops4-nixos/nixops4";
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
};
outputs =
inputs:
import ./mkFlake.nix inputs (
{ inputs, sources, ... }:
{
imports = [
inputs.nixops4.modules.flake.default
];
nixops4Deployments.check-deployment-model = {
imports = [ ./deployment/check/data-model/deployment.nix ];
_module.args = { inherit inputs sources; };
};
}
);
}

View file

@ -0,0 +1,50 @@
{ inputs, lib, ... }:
{
_class = "nixosTest";
name = "deployment-model";
sourceFileset = lib.fileset.unions [
../../data-model.nix
../../function.nix
./constants.nix
./deployment.nix
];
nodes.deployer =
{ pkgs, ... }:
{
environment.systemPackages = [
inputs.nixops4.packages.${pkgs.system}.default
];
# FIXME: sad times
system.extraDependencies = with pkgs; [
jq
jq.inputDerivation
];
system.extraDependenciesFromModule =
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
hello
cowsay
];
};
};
extraTestScript = ''
with subtest("Check the status before deployment"):
hello.fail("hello 1>&2")
cowsay.fail("cowsay 1>&2")
with subtest("Run the deployment"):
deployer.succeed("nixops4 apply check-deployment-model --show-trace --no-interactive 1>&2")
with subtest("Check the deployment"):
hello.succeed("hello 1>&2")
cowsay.succeed("cowsay hi 1>&2")
'';
}

View file

@ -1,7 +1,7 @@
let
inherit (import ../default.nix { }) pkgs inputs;
inherit (pkgs) lib;
inherit (lib) mkOption;
inherit (lib) mkOption types;
eval =
module:
(lib.evalModules {
@ -13,17 +13,83 @@ let
./data-model.nix
];
}).config;
nixops4Deployment = inputs.nixops4.modules.nixops4Deployment.default;
inherit (inputs.nixops4.lib) mkDeployment;
in
{
_class = "nix-unit";
test-eval = {
/**
This tests a very simple arrangement that features all ingredients of the Fediversity business logic:
application, resource, environment, deployment; and wires it all up in one end-to-end exercise.
- The dummy resource is a login shell made available for some user.
- The dummy application is `hello` that requires a shell to be deployed.
- The dummy environment is a single NixOS VM that hosts one login shell, for the operator.
- The dummy configuration enables the `hello` application.
This will produce a NixOps4 deployment for a NixOS VM with a login shell for the operator and `hello` available.
*/
expr =
let
fediversity = eval (
{ config, ... }:
{
config = {
resources.login-shell = {
description = "The operator needs to be able to log into the shell";
request =
{ ... }:
{
_class = "fediversity-resource-request";
options = {
wheel = mkOption {
description = "Whether the login user needs root permissions";
type = types.bool;
default = false;
};
packages = mkOption {
description = "Packages that need to be available in the user environment";
type = with types; attrsOf package;
};
};
};
policy =
{ config, ... }:
{
_class = "fediversity-resource-policy";
options = {
username = mkOption {
description = "Username for the operator";
type = types.str; # TODO: use the proper constraints from NixOS
};
wheel = mkOption {
description = "Whether to allow login with root permissions";
type = types.bool;
default = false;
};
};
config = {
resource-type = types.raw; # TODO: splice out the user type from NixOS
apply =
requests:
let
# Filter out requests that need wheel if policy doesn't allow it
validRequests = lib.filterAttrs (
_name: req: !req.login-shell.wheel || config.wheel
) requests.resources;
in
lib.optionalAttrs (validRequests != { }) {
${config.username} = {
isNormalUser = true;
packages =
with lib;
attrValues (concatMapAttrs (_name: request: request.login-shell.packages) validRequests);
extraGroups = lib.optional config.wheel "wheel";
};
};
};
};
};
applications.hello =
{ ... }:
{
@ -31,15 +97,42 @@ in
module =
{ ... }:
{
options = {
enable = lib.mkEnableOption "Hello in the shell";
options.enable = lib.mkEnableOption "Hello in the shell";
};
implementation = cfg: {
input = cfg;
output = lib.optionalAttrs cfg.enable {
resources.hello.login-shell.packages.hello = pkgs.hello;
};
};
};
environments.single-nixos-vm =
{ config, ... }:
{
resources.operator-environment.login-shell.username = "operator";
implementation = requests: {
input = requests;
output =
{ providers, ... }:
{
providers = {
inherit (inputs.nixops4.modules.nixops4Provider) local;
};
resources.the-machine = {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
];
nixos.module =
{ ... }:
{
users.users = config.resources.shell.login-shell.apply (
lib.filterAttrs (_name: value: value ? login-shell) requests
);
};
};
};
};
implementation =
cfg:
lib.optionalAttrs cfg.enable {
dummy.login-shell.packages.hello = pkgs.hello;
};
};
};
};
options = {
@ -51,20 +144,64 @@ in
applications.hello.enable = true;
};
};
example-deployment = mkOption {
type = types.submodule nixops4Deployment;
readOnly = true;
default = config.environments.single-nixos-vm.deployment config.example-configuration;
};
};
}
);
resources = fediversity.applications.hello.resources fediversity.example-configuration.applications.hello;
hello-shell = resources.resources.hello.login-shell;
environment = fediversity.environments.single-nixos-vm.resources.operator-environment.login-shell;
result = mkDeployment {
modules = [
(fediversity.environments.single-nixos-vm.deployment fediversity.example-configuration)
];
};
in
{
inherit (fediversity)
example-configuration
;
number-of-resources = with lib; length (attrNames fediversity.resources);
inherit (fediversity) example-configuration;
hello-package-exists = hello-shell.packages ? hello;
wheel-required = hello-shell.wheel;
wheel-allowed = environment.wheel;
operator-shell =
let
operator = (environment.apply resources).operator;
in
{
inherit (operator) isNormalUser;
packages = map (p: "${p.pname}") operator.packages;
extraGroups = operator.extraGroups;
};
deployment = {
inherit (result) _type;
deploymentFunction = lib.isFunction result.deploymentFunction;
getProviders = lib.isFunction result.getProviders;
};
};
expected = {
number-of-resources = 1;
example-configuration = {
enable = true;
applications.hello.enable = true;
};
hello-package-exists = true;
wheel-required = false;
wheel-allowed = false;
operator-shell = {
isNormalUser = true;
packages = [ "hello" ];
extraGroups = [ ];
};
deployment = {
_type = "nixops4Deployment";
deploymentFunction = true;
getProviders = true;
};
};
};
}

View file

@ -1,6 +1,7 @@
{
lib,
config,
inputs,
...
}:
let
@ -15,19 +16,73 @@ let
;
functionType = import ./function.nix;
application-resources = {
application-resources = submodule {
options.resources = mkOption {
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
attrTag (
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.request; }) config.resources
)
);
};
};
nixops4Deployment = types.deferredModuleWith {
staticModules = [
inputs.nixops4.modules.nixops4Deployment.default
{
_class = "nixops4Deployment";
_module.args = {
resourceProviderSystem = builtins.currentSystem;
resources = { };
};
}
];
};
in
{
_class = "nixops4Deployment";
options = {
resources = mkOption {
description = "Collection of deployment resources that can be required by applications and policed by hosting providers";
type = attrsOf (
submodule (
{ ... }:
{
_class = "fediversity-resource";
options = {
description = mkOption {
description = "Description of the resource to help application module authors and hosting providers to work with it";
type = types.str;
};
request = mkOption {
description = "Options for declaring resource requirements by an application, a description of how the resource is consumed or accessed";
type = deferredModuleWith { staticModules = [ { _class = "fediversity-resource-request"; } ]; };
};
policy = mkOption {
description = "Options for configuring the resource policy for the hosting provider, a description of how the resource is made available";
type = deferredModuleWith {
staticModules = [
(policy: {
_class = "fediversity-resource-policy";
options.resource-type = mkOption {
description = "The type of resource this policy configures";
type = types.optionType;
};
# TODO(@fricklerhandwerk): we may want to make the function type explict here: `request -> resource-type`
# and then also rename this to be consistent with the application's resource mapping
options.apply = mkOption {
description = "Apply the policy to a request";
type = functionTo policy.config.resource-type;
};
})
];
};
};
};
}
)
);
};
applications = mkOption {
description = "Collection of Fediversity applications";
type = attrsOf (
@ -52,12 +107,13 @@ in
readOnly = true;
default = input: (application.config.implementation input).output;
};
# TODO(@fricklerhandwerk): this needs a better name, it's just the type
config-mapping = mkOption {
description = "Function type for the mapping from application configuration to required resources";
type = submodule functionType;
readOnly = true;
default = {
input-type = application.config.module;
input-type = submodule application.config.module;
output-type = application-resources;
};
};
@ -65,6 +121,60 @@ in
})
);
};
environments = mkOption {
description = "Run-time environments for Fediversity applications to be deployed to";
type = attrsOf (
submodule (environment: {
_class = "fediversity-environment";
options = {
resources = mkOption {
description = ''
Resources made available by the hosting provider, and their policies.
Setting this is optional, but provides a place to declare that information for programmatic use in the resource mapping.
'';
# TODO: maybe transpose, and group the resources by type instead
type = attrsOf (
attrTag (
lib.mapAttrs (_name: resource: mkOption { type = submodule resource.policy; }) config.resources
)
);
};
implementation = mkOption {
description = "Mapping of resources required by applications to available resources; the result can be deployed";
type = environment.config.resource-mapping.function-type;
};
resource-mapping = mkOption {
description = "Function type for the mapping from resources to a (NixOps4) deployment";
type = submodule functionType;
readOnly = true;
default = {
input-type = application-resources;
output-type = nixops4Deployment;
};
};
# TODO(@fricklerhandwerk): maybe this should be a separate thing such as `fediversity-setup`,
# which makes explicit which applications and environments are available.
# then the deployments can simply be the result of the function application baked into this module.
deployment = mkOption {
description = "Generate a deployment from a configuration, by applying an environment's resource policies to the applications' resource mappings";
type = functionTo (environment.config.resource-mapping.output-type);
readOnly = true;
default =
cfg:
# TODO: check cfg.enable.true
let
required-resources = lib.mapAttrs (
name: application-settings: config.applications.${name}.resources application-settings
) cfg.applications;
in
(environment.config.implementation required-resources).output;
};
};
})
);
};
configuration = mkOption {
description = "Configuration type declaring options to be set by operators";
type = optionType;

View file

@ -21,6 +21,11 @@
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
deployment-model = import ./check/data-model {
inherit (pkgs.testers) runNixOSTest;
inherit inputs sources;
};
};
};
}

View file

@ -5,7 +5,6 @@
let
inherit (lib) mkOption types;
inherit (types)
deferredModule
submodule
functionTo
optionType
@ -14,10 +13,10 @@ in
{
options = {
input-type = mkOption {
type = deferredModule;
type = optionType;
};
output-type = mkOption {
type = deferredModule;
type = optionType;
};
function-type = mkOption {
type = optionType;
@ -25,10 +24,10 @@ in
default = functionTo (submodule {
options = {
input = mkOption {
type = submodule config.input-type;
type = config.input-type;
};
output = mkOption {
type = submodule config.output-type;
type = config.output-type;
};
};
});

View file

@ -6,7 +6,7 @@
_class = "nixos";
users.users = {
root.openssh.authorizedKeys.keys = config.user.users.procolix.openssh.authorizedKeys.keys;
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = {
isNormalUser = true;

View file

@ -23,21 +23,17 @@ let
makeResourceModule =
{ vmName, isTestVm }:
{
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch flake-parts and have our own data model for how the project is organised internally
_module.args = {
inherit
inputs
keys
secrets
;
};
nixos.module.imports = [
./common/proxmox-qemu-vm.nix
];
nixos.specialArgs = {
inherit sources;
inherit
sources
inputs
keys
secrets
;
};
imports =
@ -79,7 +75,13 @@ let
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs`
# once we're sure it's sane. leaving it here for better control during refactoring.
specialArgs = {
inherit sources;
inherit
sources
inputs
keys
secrets
;
};
});
};