forked from fediversity/fediversity
Compare commits
93 commits
e916c606d1
...
bf4df5500a
| Author | SHA1 | Date | |
|---|---|---|---|
| bf4df5500a | |||
| 58c1999fd2 | |||
| 1f99a4c6c3 | |||
| 588bb77a94 | |||
| df3a070fa4 | |||
| be72b82875 | |||
| 1b66028f32 | |||
| 4509d277d3 | |||
| e488230d7b | |||
| 765183cd0d | |||
| 6cf1d87f0b | |||
| 8253288f8a | |||
| 67f50f08de | |||
| 5402178e7b | |||
| e627815399 | |||
| 354dba260a | |||
| b791bd515d | |||
| f2017aaeb4 | |||
| 980a994f83 | |||
| b9b13df04e | |||
| 159e4107b8 | |||
| 86305a6a2e | |||
| e62f14d9be | |||
| 82f83eea0d | |||
| aef414ffe8 | |||
| 6d74112518 | |||
| 2b2fb059fd | |||
| 66ceb66382 | |||
| ad9c61a3db | |||
| b4e1c5b5b3 | |||
| de38611572 | |||
| 1d40dcfc0e | |||
| c3bf158130 | |||
| 48c6a1f22b | |||
| 8a7984933d | |||
| 5520fa721b | |||
| eabfc228c5 | |||
| 3f923532a2 | |||
| 37d4fc5a42 | |||
| c7b05bb473 | |||
| 5345860006 | |||
| fbb0806072 | |||
| b0848727fd | |||
| a882de0b8e | |||
| 05572ff69e | |||
| 13c92280ab | |||
| 871672d447 | |||
| 6da42936e7 | |||
| 8df70a2ff0 | |||
| 5a92c2c0bc | |||
| 1c92009879 | |||
| a791ad41ec | |||
| c1dc0fef01 | |||
| 5a3cbe4d83 | |||
| fd1d55df5f | |||
| 0c23115cff | |||
| 3f1c8a9bb7 | |||
| 737aecaba6 | |||
| d7dbdd923c | |||
| 1c44004cfe | |||
| ae444d5352 | |||
| e77fdd9eec | |||
| 1f1cf0d516 | |||
| f94eac698a | |||
| 46182e7512 | |||
| 9a25a04bfa | |||
| c1b33121b6 | |||
| d073bd706d | |||
| 486b316885 | |||
| 611c961dcf | |||
| d67f533948 | |||
| bd1cfd7a7c | |||
| 939f9d961d | |||
| 4801433ae0 | |||
| 3a3a083793 | |||
| ace56e754e | |||
| dbb4ce67fc | |||
| 5a514b96e9 | |||
| 1b832c1f5b | |||
| 69b2e535fe | |||
| 09119803e8 | |||
| 4dd1491e71 | |||
| 2f55e1512a | |||
| b59f8a4183 | |||
| 56b953526b | |||
| 1f8677e83d | |||
| 2fae356d0a | |||
| 046f7c5998 | |||
| 69579fea1c | |||
| afc7ad2b88 | |||
| ac66b9d3c6 | |||
| ee5c2b90b7 | |||
| 5f66a034f3 |
166 changed files with 3453 additions and 922 deletions
24
.forgejo/workflows/cd.yaml
Normal file
24
.forgejo/workflows/cd.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
name: deploy-infra
|
||||
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: native
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up SSH key for age secrets and SSH
|
||||
run: |
|
||||
env
|
||||
mkdir -p ~/.ssh
|
||||
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
|
||||
- name: Deploy
|
||||
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'
|
||||
|
|
@ -15,20 +15,76 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
- run: nix-build -A tests
|
||||
|
||||
check-data-model:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
|
||||
|
||||
check-mastodon:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
|
||||
|
||||
check-peertube:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: cd services && nix-build -A tests.peertube
|
||||
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
|
||||
|
||||
check-panel:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: cd panel && nix-build -A tests
|
||||
- run: nix-build -A tests.panel
|
||||
|
||||
check-deployment-basic:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.deployment-basic -L
|
||||
|
||||
check-deployment-cli:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.deployment-cli -L
|
||||
|
||||
check-deployment-panel:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix build .#checks.x86_64-linux.deployment-panel -L
|
||||
|
||||
## NOTE: NixOps4 does not provide a good “dry run” mode, so we instead check
|
||||
## proxies for resources, namely whether their `.#vmOptions.<machine>` and
|
||||
## `.#nixosConfigurations.<machine>` outputs evaluate and build correctly, and
|
||||
## whether we can dry run `infra/proxmox-*.sh` on them. This will not catch
|
||||
## everything, and in particular not issues in how NixOps4 wires up the
|
||||
## resources, but that is still something.
|
||||
check-resources:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: |
|
||||
set -euC
|
||||
echo ==================== [ VM Options ] ====================
|
||||
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).vmOptions)')
|
||||
for machine in $machines; do
|
||||
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
|
||||
nix build .#checks.x86_64-linux.vmOptions-$machine
|
||||
done
|
||||
echo
|
||||
echo ==================== [ NixOS Configurations ] ====================
|
||||
machines=$(nix eval --impure --raw --expr 'with builtins; toString (attrNames (getFlake (toString ./.)).nixosConfigurations)')
|
||||
for machine in $machines; do
|
||||
echo ~~~~~~~~~~~~~~~~~~~~~: $machine :~~~~~~~~~~~~~~~~~~~~~
|
||||
nix build .#checks.x86_64-linux.nixosConfigurations-$machine
|
||||
done
|
||||
|
||||
check-launch:
|
||||
runs-on: native
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: cd launch && nix-build -A tests
|
||||
|
|
|
|||
24
.forgejo/workflows/update.yaml
Normal file
24
.forgejo/workflows/update.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
name: update-dependencies
|
||||
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
# FIXME: re-enable when manual run works
|
||||
# schedule:
|
||||
# - cron: '0 0 1 * *' # monthly
|
||||
|
||||
jobs:
|
||||
lockfile:
|
||||
runs-on: native
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Update pins
|
||||
run: nix-shell --run "npins --verbose update"
|
||||
- name: Create PR
|
||||
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
|
||||
with:
|
||||
remote-instance-api-version: v1
|
||||
token: "${{ secrets.DEPLOY_KEY }}"
|
||||
branch: npins-update
|
||||
commit-message: "npins: update sources"
|
||||
title: "npins: update sources"
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
|
|
@ -1,3 +1,9 @@
|
|||
.npins.json
|
||||
.terraform/
|
||||
.terraform.lock.hcl
|
||||
.terraform.tfstate.lock.info
|
||||
terraform.tfstate*
|
||||
.auto.tfvars.json
|
||||
.DS_Store
|
||||
.idea
|
||||
*.log
|
||||
|
|
|
|||
33
README.md
33
README.md
|
|
@ -14,6 +14,12 @@ There already exist solutions for self-hosting, but they're not suitable for wha
|
|||
The ones we're aware of require substantial technical knowledge and time commitment by operators, especially for scaling to thousands of users.
|
||||
Not everyone has the expertise and time to run their own server.
|
||||
|
||||
## Interactions
|
||||
|
||||
To reach these goals, we aim to implement the following interactions between [actors](#actors) (depicted with rounded corners) and system components (see the [glossary](#glossary), depicted with rectangles).
|
||||
|
||||

|
||||
|
||||
## Actors
|
||||
|
||||
- Fediversity project team
|
||||
|
|
@ -57,11 +63,11 @@ Not everyone has the expertise and time to run their own server.
|
|||
|
||||
- [Fediverse](https://en.wikipedia.org/wiki/Fediverse)
|
||||
|
||||
A collection of social networking services that can communicate with each other using a common protocol.
|
||||
A collection of social networking applications that can communicate with each other using a common protocol.
|
||||
|
||||
- Service
|
||||
- Application
|
||||
|
||||
A Fediverse application run by the hosting provider for an operator.
|
||||
User-facing software (e.g. from Fediverse) run by the hosting provider for an operator.
|
||||
|
||||
- Configuration
|
||||
|
||||
|
|
@ -73,11 +79,11 @@ Not everyone has the expertise and time to run their own server.
|
|||
|
||||
Make a resource, such as a virtual machine, available for use.
|
||||
|
||||
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for services run by operators.
|
||||
> Example: We use [Proxmox](https://www.proxmox.com) to provision VMs for applications run by operators.
|
||||
|
||||
- Deploy
|
||||
|
||||
Put software, such as services, onto computers.
|
||||
Put software, such as applications, onto computers.
|
||||
The software includes technical configuration that links software components.
|
||||
Most user-facing configuration remains untouched by the deployment process.
|
||||
|
||||
|
|
@ -85,7 +91,7 @@ Not everyone has the expertise and time to run their own server.
|
|||
|
||||
- Migrate
|
||||
|
||||
Move service configurations and user data to a different hosting provider.
|
||||
Move service configurations and deployment state, including user data, from one hosting provider to another.
|
||||
|
||||
- [NixOps4](https://github.com/nixops4/nixops4)
|
||||
|
||||
|
|
@ -103,6 +109,18 @@ Not everyone has the expertise and time to run their own server.
|
|||
|
||||
> Example: We need a resource provider for obtaining deployment secrets from a database.
|
||||
|
||||
- Runtime backend
|
||||
|
||||
A type of digital environment one can run operating systems such as NixOS on, e.g. bare-metal, a hypervisor, or a container runtime.
|
||||
|
||||
- Runtime environment
|
||||
|
||||
The thing a deployment runs on, an interface against which the deployment is working. See runtime backend.
|
||||
|
||||
- Runtime config
|
||||
|
||||
Configuration logic specific to a runtime backend, e.g. how to deploy, how to access object storage.
|
||||
|
||||
## Development
|
||||
|
||||
All the code made for this project is freely licenced under [EUPL](https://en.m.wikipedia.org/wiki/European_Union_Public_Licence).
|
||||
|
|
@ -136,6 +154,3 @@ details as to what they are for. As an overview:
|
|||
|
||||
- [`services/`](./services) contains our effort to make Fediverse applications
|
||||
work seemlessly together in our specific setting.
|
||||
|
||||
- [`website/`](./website) contains the framework and the content of [the
|
||||
Fediversity website](https://fediversity.eu/)
|
||||
|
|
|
|||
29
default.nix
29
default.nix
|
|
@ -10,6 +10,9 @@ let
|
|||
gitignore
|
||||
;
|
||||
inherit (pkgs) lib;
|
||||
inherit (import sources.flake-inputs) import-flake;
|
||||
inherit ((import-flake { src = ./.; }).inputs) nixops4;
|
||||
panel = import ./panel { inherit sources system; };
|
||||
pre-commit-check =
|
||||
(import "${git-hooks}/nix" {
|
||||
inherit nixpkgs system;
|
||||
|
|
@ -24,6 +27,7 @@ let
|
|||
## Add a directory here if pre-commit hooks shouldn't apply to it.
|
||||
optout = [
|
||||
"npins"
|
||||
"**/.terraform"
|
||||
];
|
||||
excludes = map (dir: "^${dir}/") optout;
|
||||
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
|
||||
|
|
@ -41,10 +45,35 @@ in
|
|||
shell = pkgs.mkShellNoCC {
|
||||
inherit (pre-commit-check) shellHook;
|
||||
buildInputs = pre-commit-check.enabledPackages;
|
||||
packages =
|
||||
let
|
||||
test-loop = pkgs.writeShellApplication {
|
||||
name = "test-loop";
|
||||
runtimeInputs = [
|
||||
pkgs.watchexec
|
||||
pkgs.nix-unit
|
||||
];
|
||||
text = ''
|
||||
watchexec -w ${builtins.toString ./.} -- nix-unit ${builtins.toString ./deployment/data-model-test.nix} "$@"
|
||||
'';
|
||||
};
|
||||
in
|
||||
[
|
||||
pkgs.npins
|
||||
pkgs.nil
|
||||
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
|
||||
pkgs.openssh
|
||||
pkgs.httpie
|
||||
pkgs.jq
|
||||
pkgs.nix-unit
|
||||
test-loop
|
||||
nixops4.packages.${system}.default
|
||||
];
|
||||
};
|
||||
|
||||
tests = {
|
||||
inherit pre-commit-check;
|
||||
panel = panel.tests;
|
||||
};
|
||||
|
||||
# re-export inputs so they can be overridden granularly
|
||||
|
|
|
|||
|
|
@ -1,6 +1,123 @@
|
|||
# Deployment
|
||||
|
||||
This repository contains work to generate a full Fediversity deployment from a
|
||||
minimal configuration. This is different from [`../services/`](../services) that
|
||||
focuses on one machine, providing a polished and unified interface to different
|
||||
Fediverse services.
|
||||
This directory contains work to generate a full Fediversity deployment from a minimal configuration.
|
||||
This is different from [`../services/`](../services) that focuses on one machine, providing a polished and unified interface to different Fediverse services.
|
||||
|
||||
## Data model
|
||||
|
||||
The core piece of the project is the [Fediversity data model](./data-model.nix), which describes all entities and their interactions.
|
||||
|
||||
What can be done with it is exemplified in the [evaluation tests](./data-model-test.nix).
|
||||
Run `test-loop` in the development environment when hacking on the data model or adding tests.
|
||||
|
||||
## Checks
|
||||
|
||||
There are three levels of deployment checks: `basic`, `cli`, `panel`.
|
||||
They can be found in subdirectories of [`check/`](./check).
|
||||
They can be run as part of `nix flake check` or individually as:
|
||||
|
||||
``` console
|
||||
$ nix build .#checks.<system>.deployment-<name> -vL
|
||||
```
|
||||
|
||||
Since `nixops4 apply` operates on a flake, the tests take this repository's flake as a template.
|
||||
This also why there are some dummy files that will be overwritten inside the test.
|
||||
|
||||
### Basic deployment check
|
||||
|
||||
The basic deployment check is here as a building block and sanity check.
|
||||
It does not actually use any of the code in this directory but checks that our test strategy is sound and that basic NixOps4 functionalities are here.
|
||||
|
||||
It is a NixOS test featuring one deployer machine and two target machines.
|
||||
The deployment simply adds `pkgs.hello` to one and `pkgs.cowsay` to the other.
|
||||
It is heavily inspired by [a similar test in `nixops4-nixos`].
|
||||
|
||||
[a similar test in nixops4-nixos]: https://github.com/nixops4/nixops4-nixos/blob/main/test/default/nixosTest.nix
|
||||
|
||||
This test involves three nodes:
|
||||
|
||||
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
|
||||
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
|
||||
|
||||
- “target machines” are two eponymous nodes on which the packages `hello` and `cowsay` will be deployed.
|
||||
They start with a minimal configuration.
|
||||
|
||||
``` mermaid
|
||||
flowchart LR
|
||||
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
|
||||
|
||||
subgraph target_machines["target machines"]
|
||||
direction TB
|
||||
hello
|
||||
cowsay
|
||||
end
|
||||
|
||||
deployer -->|deploys| target_machines
|
||||
```
|
||||
|
||||
### Service deployment check using `nixops4 apply`
|
||||
|
||||
This check omits the panel by running a direct invocation of NixOps4.
|
||||
It deploys some services and checks that they are indeed on the target machines, then cleans them up and checks whether that works, too.
|
||||
It builds upon the basic deployment check.
|
||||
|
||||
This test involves seven nodes:
|
||||
|
||||
- `deployer` is the node that will perform the deployment using `nixops4 apply`.
|
||||
Because the test runs in a sandboxed environment, `deployer` will not have access to internet, and therefore it must already have all store paths needed for the target nodes.
|
||||
|
||||
- “target machines” are four nodes — `garage`, `mastodon`, `peertube`, and `pixelfed` — on which the services will be deployed.
|
||||
They start with a minimal configuration.
|
||||
|
||||
- `acme` is a node that runs [Pebble], a miniature ACME server to deliver the certificates that the services expect.
|
||||
|
||||
- [WIP] `client` is a node that runs a browser controlled by some Selenium scripts in order to check that the services are indeed running and are accessible.
|
||||
|
||||
[Pebble]: https://github.com/letsencrypt/pebble
|
||||
|
||||
``` mermaid
|
||||
flowchart LR
|
||||
|
||||
classDef invisible fill:none,stroke:none
|
||||
|
||||
subgraph left [" "]
|
||||
direction TB
|
||||
|
||||
deployer["deployer<br><font size='1'>has store paths<br>runs nixops4</font>"]
|
||||
client["client<br><font size='1'>Selenium scripts</font>"]
|
||||
end
|
||||
|
||||
subgraph middle [" "]
|
||||
subgraph target_machines["target machines"]
|
||||
direction TB
|
||||
|
||||
garage
|
||||
mastodon
|
||||
peertube
|
||||
pixelfed
|
||||
end
|
||||
end
|
||||
|
||||
subgraph right [" "]
|
||||
direction TB
|
||||
|
||||
acme["acme<br><font size='1'>runs Pebble</font>"]
|
||||
end
|
||||
|
||||
left ~~~ middle ~~~ right
|
||||
class left,middle,right invisible
|
||||
|
||||
deployer -->|deploys| target_machines
|
||||
|
||||
client -->|tests| mastodon
|
||||
client -->|tests| peertube
|
||||
client -->|tests| pixelfed
|
||||
|
||||
target_machines -->|get certs| acme
|
||||
```
|
||||
|
||||
### Service deployment check from the FediPanel
|
||||
|
||||
This is a full deployment check running the [FediPanel](../panel) on the deployer machine, deploying some services through it and checking that they are indeed on the target machines, then cleans them up and checks whether that works, too.
|
||||
|
||||
It builds upon the basic and CLI deployment checks, the only difference being that `deployer` runs NixOps4 only indirectly via the panel, and the `client` node is the one that triggers the deployment via a browser, the way a human would.
|
||||
|
|
|
|||
|
|
@ -1,9 +0,0 @@
|
|||
# Basic deployment test
|
||||
|
||||
Basic deployment test with one deployer machine, one target machine, and a
|
||||
simple target application, namely cowsay. The goal is to check that basic
|
||||
functionalities are here.
|
||||
|
||||
It is heavily inspired by a similar test in nixops4-nixos:
|
||||
|
||||
https://github.com/nixops4/nixops4-nixos/blob/main/test/default/nixosTest.nix
|
||||
8
deployment/check/basic/constants.nix
Normal file
8
deployment/check/basic/constants.nix
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
targetMachines = [
|
||||
"hello"
|
||||
"cowsay"
|
||||
];
|
||||
pathToRoot = ../../..;
|
||||
pathFromRoot = ./.;
|
||||
}
|
||||
14
deployment/check/basic/default.nix
Normal file
14
deployment/check/basic/default.nix
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
runNixOSTest,
|
||||
inputs,
|
||||
sources,
|
||||
}:
|
||||
|
||||
runNixOSTest {
|
||||
imports = [
|
||||
../common/nixosTest.nix
|
||||
./nixosTest.nix
|
||||
];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
## This file is just a placeholder. It is overwritten by the test.
|
||||
|
|
@ -1,32 +1,36 @@
|
|||
{
|
||||
inputs,
|
||||
sources,
|
||||
lib,
|
||||
providers,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (import ./constants.nix) targetMachines pathToRoot pathFromRoot;
|
||||
in
|
||||
|
||||
{
|
||||
providers.local = inputs.nixops4.modules.nixops4Provider.local;
|
||||
providers = {
|
||||
inherit (inputs.nixops4.modules.nixops4Provider) local;
|
||||
};
|
||||
|
||||
resources.target = {
|
||||
resources = lib.genAttrs targetMachines (nodeName: {
|
||||
type = providers.local.exec;
|
||||
imports = [ inputs.nixops4-nixos.modules.nixops4Resource.nixos ];
|
||||
|
||||
ssh = {
|
||||
host = "target";
|
||||
hostPublicKey = builtins.readFile ./target_host_key.pub;
|
||||
};
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixops4Resource.nixos
|
||||
../common/targetResource.nix
|
||||
];
|
||||
|
||||
_module.args = { inherit inputs sources; };
|
||||
|
||||
inherit nodeName pathToRoot pathFromRoot;
|
||||
|
||||
nixpkgs = inputs.nixpkgs;
|
||||
nixos.module =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
./minimalTarget.nix
|
||||
(lib.modules.importJSON ./target-network.json)
|
||||
];
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
environment.systemPackages = [ pkgs.cowsay ];
|
||||
environment.systemPackages = [ pkgs.${nodeName} ];
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
{ inputs, ... }:
|
||||
|
||||
{
|
||||
nixops4Deployments.check-deployment-basic =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./deployment.nix
|
||||
];
|
||||
_module.args.inputs = inputs;
|
||||
};
|
||||
|
||||
perSystem =
|
||||
{ inputs', pkgs, ... }:
|
||||
{
|
||||
checks.deployment-basic = pkgs.callPackage ./nixosTest.nix {
|
||||
nixops4-flake-in-a-bottle = inputs'.nixops4.packages.flake-in-a-bottle;
|
||||
inherit inputs;
|
||||
};
|
||||
};
|
||||
}
|
||||
22
deployment/check/basic/flake-under-test.nix
Normal file
22
deployment/check/basic/flake-under-test.nix
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
inputs = {
|
||||
nixops4.follows = "nixops4-nixos/nixops4";
|
||||
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs:
|
||||
import ./mkFlake.nix inputs (
|
||||
{ inputs, sources, ... }:
|
||||
{
|
||||
imports = [
|
||||
inputs.nixops4.modules.flake.default
|
||||
];
|
||||
|
||||
nixops4Deployments.check-deployment-basic = {
|
||||
imports = [ ./deployment/check/basic/deployment.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
lib,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
(modulesPath + "/../lib/testing/nixos-test-base.nix")
|
||||
];
|
||||
|
||||
## Test framework disables switching by default. That might be OK by itself,
|
||||
## but we also use this config for getting the dependencies in
|
||||
## `deployer.system.extraDependencies`.
|
||||
system.switch.enable = true;
|
||||
|
||||
nix = {
|
||||
## Not used; save a large copy operation
|
||||
channel.enable = false;
|
||||
registry = lib.mkForce { };
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PermitRootLogin = "yes";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 22 ];
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keyFiles = [ ./deployer.pub ];
|
||||
|
||||
## Test VMs don't have a bootloader by default.
|
||||
boot.loader.grub.enable = false;
|
||||
}
|
||||
|
|
@ -1,161 +1,48 @@
|
|||
{ inputs, lib, ... }:
|
||||
|
||||
{
|
||||
testers,
|
||||
inputs,
|
||||
runCommandNoCC,
|
||||
nixops4-flake-in-a-bottle,
|
||||
...
|
||||
}:
|
||||
_class = "nixosTest";
|
||||
|
||||
testers.runNixOSTest (
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
hostPkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
vmSystem = config.node.pkgs.hostPlatform.system;
|
||||
name = "deployment-basic";
|
||||
|
||||
pathToRoot = ../../..;
|
||||
pathFromRoot = "deployment/check/basic";
|
||||
deploymentName = "check-deployment-basic";
|
||||
sourceFileset = lib.fileset.unions [
|
||||
./constants.nix
|
||||
./deployment.nix
|
||||
];
|
||||
|
||||
## TODO: sanity check the existence of (pathToRoot + "/flake.nix")
|
||||
## TODO: sanity check that (pathToRoot + "/${pathFromRoot}" == ./.)
|
||||
nodes.deployer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
inputs.nixops4.packages.${pkgs.system}.default
|
||||
];
|
||||
|
||||
## The whole repository, with the flake at its root.
|
||||
src = lib.fileset.toSource {
|
||||
fileset = pathToRoot;
|
||||
root = pathToRoot;
|
||||
};
|
||||
# FIXME: sad times
|
||||
system.extraDependencies = with pkgs; [
|
||||
jq
|
||||
jq.inputDerivation
|
||||
];
|
||||
|
||||
## We will need to override some inputs by the empty flake, so we make one.
|
||||
emptyFlake = runCommandNoCC "empty-flake" { } ''
|
||||
mkdir $out
|
||||
echo "{ outputs = { self }: {}; }" > $out/flake.nix
|
||||
'';
|
||||
|
||||
targetNetworkJSON = hostPkgs.writeText "target-network.json" (
|
||||
builtins.toJSON config.nodes.target.system.build.networkConfig
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
name = "deployment-basic";
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixosTest.static
|
||||
];
|
||||
|
||||
nodes = {
|
||||
deployer =
|
||||
{ pkgs, nodes, ... }:
|
||||
system.extraDependenciesFromModule =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
inputs.nixops4.packages.${vmSystem}.default
|
||||
environment.systemPackages = with pkgs; [
|
||||
hello
|
||||
cowsay
|
||||
];
|
||||
|
||||
virtualisation = {
|
||||
## Memory use is expected to be dominated by the NixOS evaluation,
|
||||
## which happens on the deployer.
|
||||
memorySize = 4096;
|
||||
diskSize = 10 * 1024;
|
||||
cores = 2;
|
||||
};
|
||||
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
system.extraDependencies =
|
||||
[
|
||||
"${inputs.flake-parts}"
|
||||
"${inputs.flake-parts.inputs.nixpkgs-lib}"
|
||||
"${inputs.nixops4}"
|
||||
"${inputs.nixops4-nixos}"
|
||||
"${inputs.nixpkgs}"
|
||||
|
||||
pkgs.stdenv
|
||||
pkgs.stdenvNoCC
|
||||
|
||||
pkgs.cowsay
|
||||
pkgs.cowsay.inputDerivation # NOTE: Crucial!!!
|
||||
|
||||
## Some derivations will be different compared to target's initial
|
||||
## state, so we'll need to be able to build something similar.
|
||||
## Generally the derivation inputs aren't that different, so we
|
||||
## use the initial state of the target as a base.
|
||||
nodes.target.system.build.toplevel.inputDerivation
|
||||
nodes.target.system.build.etc.inputDerivation
|
||||
nodes.target.system.path.inputDerivation
|
||||
nodes.target.system.build.bootStage1.inputDerivation
|
||||
nodes.target.system.build.bootStage2.inputDerivation
|
||||
]
|
||||
++ lib.concatLists (
|
||||
lib.mapAttrsToList (
|
||||
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
|
||||
) nodes.target.environment.etc
|
||||
);
|
||||
};
|
||||
|
||||
target.imports = [ ./minimalTarget.nix ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
extraTestScript = ''
|
||||
with subtest("Check the status before deployment"):
|
||||
hello.fail("hello 1>&2")
|
||||
cowsay.fail("cowsay 1>&2")
|
||||
|
||||
target.wait_for_unit("multi-user.target")
|
||||
deployer.wait_for_unit("multi-user.target")
|
||||
with subtest("Run the deployment"):
|
||||
deployer.succeed("nixops4 apply check-deployment-basic --show-trace --no-interactive 1>&2")
|
||||
|
||||
with subtest("Unpacking"):
|
||||
deployer.succeed("cp -r --no-preserve=mode ${src} work")
|
||||
|
||||
with subtest("Configure the network"):
|
||||
deployer.copy_from_host("${targetNetworkJSON}", "/root/target-network.json")
|
||||
deployer.succeed("mv /root/target-network.json work/${pathFromRoot}/target-network.json")
|
||||
|
||||
with subtest("Configure the deployer key"):
|
||||
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
|
||||
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
|
||||
deployer.succeed(f"echo '{deployer_key}' > work/${pathFromRoot}/deployer.pub")
|
||||
target.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
|
||||
|
||||
with subtest("Configure the target host key"):
|
||||
target_host_key = target.succeed("ssh-keyscan target | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
|
||||
deployer.succeed(f"echo '{target_host_key}' > work/${pathFromRoot}/target_host_key.pub")
|
||||
|
||||
## NOTE: This is super slow. It could probably be optimised in Nix, for
|
||||
## instance by allowing to grab things directly from the host's store.
|
||||
with subtest("Override the lock"):
|
||||
deployer.succeed("""
|
||||
cd work
|
||||
nix flake lock --extra-experimental-features 'flakes nix-command' \
|
||||
--offline -v \
|
||||
--override-input flake-parts ${inputs.flake-parts} \
|
||||
--override-input nixops4 ${nixops4-flake-in-a-bottle} \
|
||||
\
|
||||
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
|
||||
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
|
||||
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
|
||||
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
|
||||
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
|
||||
--override-input nixops4-nixos/nixops4 ${nixops4-flake-in-a-bottle} \
|
||||
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
|
||||
\
|
||||
--override-input nixpkgs ${inputs.nixpkgs} \
|
||||
--override-input git-hooks ${inputs.git-hooks} \
|
||||
;
|
||||
""")
|
||||
|
||||
with subtest("Check the status before deployment"):
|
||||
target.fail("cowsay hi 1>&2")
|
||||
|
||||
with subtest("Run the deployment"):
|
||||
deployer.succeed("cd work && nixops4 apply ${deploymentName} --show-trace --no-interactive")
|
||||
|
||||
with subtest("Check the deployment"):
|
||||
target.succeed("cowsay hi 1>&2")
|
||||
'';
|
||||
}
|
||||
)
|
||||
with subtest("Check the deployment"):
|
||||
hello.succeed("hello 1>&2")
|
||||
cowsay.succeed("cowsay hi 1>&2")
|
||||
'';
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
{"comment": "This file is just a placeholder. It is overwritten by the test."}
|
||||
|
|
@ -1 +0,0 @@
|
|||
## This file is just a placeholder. It is overwritten by the test.
|
||||
11
deployment/check/cli/constants.nix
Normal file
11
deployment/check/cli/constants.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
targetMachines = [
|
||||
"garage"
|
||||
"mastodon"
|
||||
"peertube"
|
||||
"pixelfed"
|
||||
];
|
||||
pathToRoot = ../../..;
|
||||
pathFromRoot = ./.;
|
||||
enableAcme = true;
|
||||
}
|
||||
19
deployment/check/cli/default.nix
Normal file
19
deployment/check/cli/default.nix
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
runNixOSTest,
|
||||
inputs,
|
||||
sources,
|
||||
}:
|
||||
|
||||
runNixOSTest {
|
||||
imports = [
|
||||
../common/nixosTest.nix
|
||||
./nixosTest.nix
|
||||
];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit (import ./constants.nix)
|
||||
targetMachines
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
}
|
||||
59
deployment/check/cli/deployments.nix
Normal file
59
deployment/check/cli/deployments.nix
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
{
|
||||
inputs,
|
||||
sources,
|
||||
lib,
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins) fromJSON readFile listToAttrs;
|
||||
inherit (import ./constants.nix)
|
||||
targetMachines
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
|
||||
makeTargetResource = nodeName: {
|
||||
imports = [ ../common/targetResource.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit
|
||||
nodeName
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
};
|
||||
|
||||
## The deployment function - what we are here to test!
|
||||
##
|
||||
## TODO: Modularise `deployment/default.nix` to get rid of the nested
|
||||
## function calls.
|
||||
makeTestDeployment =
|
||||
args:
|
||||
(import ../..)
|
||||
{
|
||||
inherit lib;
|
||||
inherit (inputs) nixops4 nixops4-nixos;
|
||||
fediversity = import ../../../services/fediversity;
|
||||
}
|
||||
(listToAttrs (
|
||||
map (nodeName: {
|
||||
name = "${nodeName}ConfigurationResource";
|
||||
value = makeTargetResource nodeName;
|
||||
}) targetMachines
|
||||
))
|
||||
(fromJSON (readFile ../../configuration.sample.json) // args);
|
||||
|
||||
in
|
||||
{
|
||||
check-deployment-cli-nothing = makeTestDeployment { };
|
||||
|
||||
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
|
||||
mastodon.enable = true;
|
||||
pixelfed.enable = true;
|
||||
};
|
||||
|
||||
check-deployment-cli-peertube = makeTestDeployment {
|
||||
peertube.enable = true;
|
||||
};
|
||||
}
|
||||
26
deployment/check/cli/flake-under-test.nix
Normal file
26
deployment/check/cli/flake-under-test.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
inputs = {
|
||||
nixops4.follows = "nixops4-nixos/nixops4";
|
||||
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs:
|
||||
import ./mkFlake.nix inputs (
|
||||
{
|
||||
inputs,
|
||||
sources,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
inputs.nixops4.modules.flake.default
|
||||
];
|
||||
|
||||
nixops4Deployments = import ./deployment/check/cli/deployments.nix {
|
||||
inherit inputs sources lib;
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
137
deployment/check/cli/nixosTest.nix
Normal file
137
deployment/check/cli/nixosTest.nix
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
{
|
||||
inputs,
|
||||
hostPkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
## Some places need a dummy file that will in fact never be used. We create
|
||||
## it here.
|
||||
dummyFile = hostPkgs.writeText "dummy" "";
|
||||
in
|
||||
|
||||
{
|
||||
_class = "nixosTest";
|
||||
|
||||
name = "deployment-cli";
|
||||
|
||||
sourceFileset = lib.fileset.unions [
|
||||
./constants.nix
|
||||
./deployments.nix
|
||||
|
||||
# REVIEW: I would like to be able to grab all of `/deployment` minus
|
||||
# `/deployment/check`, but I can't because there is a bunch of other files
|
||||
# in `/deployment`. Maybe we can think of a reorg making things more robust
|
||||
# here? (comment also in panel test)
|
||||
../../default.nix
|
||||
../../options.nix
|
||||
../../configuration.sample.json
|
||||
|
||||
../../../services/fediversity
|
||||
];
|
||||
|
||||
nodes.deployer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
inputs.nixops4.packages.${pkgs.system}.default
|
||||
];
|
||||
|
||||
## FIXME: The following dependencies are necessary but I do not
|
||||
## understand why they are not covered by the fake node.
|
||||
system.extraDependencies = with pkgs; [
|
||||
peertube
|
||||
peertube.inputDerivation
|
||||
gixy
|
||||
gixy.inputDerivation
|
||||
];
|
||||
|
||||
system.extraDependenciesFromModule = {
|
||||
imports = [ ../../../services/fediversity ];
|
||||
fediversity = {
|
||||
domain = "fediversity.net"; # would write `dummy` but that would not type
|
||||
garage.enable = true;
|
||||
mastodon = {
|
||||
enable = true;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
peertube = {
|
||||
enable = true;
|
||||
secretsFile = dummyFile;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
pixelfed = {
|
||||
enable = true;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
temp.cores = 1;
|
||||
temp.initialUser = {
|
||||
username = "dummy";
|
||||
displayName = "dummy";
|
||||
email = "dummy";
|
||||
passwordFile = dummyFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
## NOTE: The target machines may need more RAM than the default to handle
|
||||
## being deployed to, otherwise we get something like:
|
||||
##
|
||||
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
|
||||
## deployer # error: writing to file: No space left on device
|
||||
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
|
||||
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
|
||||
## deployer # Error: Could not create resource
|
||||
##
|
||||
## These values have been trimmed down to the gigabyte.
|
||||
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
||||
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
||||
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
||||
|
||||
## FIXME: The test of presence of the services are very simple: we only
|
||||
## check that there is a systemd service of the expected name on the
|
||||
## machine. This proves at least that NixOps4 did something, and we cannot
|
||||
## really do more for now because the services aren't actually working
|
||||
## properly, in particular because of DNS issues. We should fix the services
|
||||
## and check that they are working properly.
|
||||
|
||||
extraTestScript = ''
|
||||
with subtest("Check the status of the services - there should be none"):
|
||||
garage.fail("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with no services enabled"):
|
||||
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
|
||||
|
||||
with subtest("Check the status of the services - there should still be none"):
|
||||
garage.fail("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
||||
deployer.succeed("nixops4 apply check-deployment-cli-mastodon-pixelfed --show-trace --no-interactive 1>&2")
|
||||
|
||||
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
|
||||
garage.succeed("systemctl status garage.service")
|
||||
mastodon.succeed("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with only Peertube enabled"):
|
||||
deployer.succeed("nixops4 apply check-deployment-cli-peertube --show-trace --no-interactive 1>&2")
|
||||
|
||||
with subtest("Check the status of the services - expecting Garage and Peertube"):
|
||||
garage.succeed("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.succeed("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
'';
|
||||
}
|
||||
106
deployment/check/common/deployerNode.nix
Normal file
106
deployment/check/common/deployerNode.nix
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
mkOption
|
||||
mkForce
|
||||
concatLists
|
||||
types
|
||||
;
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [ ./sharedOptions.nix ];
|
||||
|
||||
options.system.extraDependenciesFromModule = mkOption {
|
||||
type = types.deferredModule;
|
||||
description = ''
|
||||
Grab the derivations needed to build the given module and dump them in
|
||||
system.extraDependencies. You want to put in this module a superset of
|
||||
all the things that you will need on your target machines.
|
||||
|
||||
NOTE: This will work as long as the union of all these configurations do
|
||||
not have conflicts that would prevent evaluation.
|
||||
'';
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = {
|
||||
virtualisation = {
|
||||
## NOTE: The deployer machines needs more RAM and default than the
|
||||
## default. These values have been trimmed down to the gigabyte.
|
||||
## Memory use is expected to be dominated by the NixOS evaluation,
|
||||
## which happens on the deployer.
|
||||
memorySize = 4 * 1024;
|
||||
diskSize = 4 * 1024;
|
||||
cores = 2;
|
||||
};
|
||||
|
||||
nix.settings = {
|
||||
substituters = mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
extra-experimental-features = "flakes";
|
||||
};
|
||||
|
||||
system.extraDependencies =
|
||||
[
|
||||
inputs.nixops4
|
||||
inputs.nixops4-nixos
|
||||
inputs.nixpkgs
|
||||
|
||||
sources.flake-parts
|
||||
sources.flake-inputs
|
||||
sources.git-hooks
|
||||
|
||||
pkgs.stdenv
|
||||
pkgs.stdenvNoCC
|
||||
]
|
||||
++ (
|
||||
let
|
||||
## We build a whole NixOS system that contains the module
|
||||
## `system.extraDependenciesFromModule`, only to grab its
|
||||
## configuration and the store paths needed to build it and
|
||||
## dump them in `system.extraDependencies`.
|
||||
machine =
|
||||
(pkgs.nixos [
|
||||
./targetNode.nix
|
||||
config.system.extraDependenciesFromModule
|
||||
{
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = config.acmeNodeIP;
|
||||
}
|
||||
]).config;
|
||||
|
||||
in
|
||||
[
|
||||
machine.system.build.toplevel.inputDerivation
|
||||
machine.system.build.etc.inputDerivation
|
||||
machine.system.build.etcBasedir.inputDerivation
|
||||
machine.system.build.etcMetadataImage.inputDerivation
|
||||
machine.system.build.extraUtils.inputDerivation
|
||||
machine.system.path.inputDerivation
|
||||
machine.system.build.setEnvironment.inputDerivation
|
||||
machine.system.build.vm.inputDerivation
|
||||
machine.system.build.bootStage1.inputDerivation
|
||||
machine.system.build.bootStage2.inputDerivation
|
||||
]
|
||||
++ concatLists (
|
||||
lib.mapAttrsToList (
|
||||
_k: v: if v ? source.inputDerivation then [ v.source.inputDerivation ] else [ ]
|
||||
) machine.environment.etc
|
||||
)
|
||||
);
|
||||
};
|
||||
}
|
||||
201
deployment/check/common/nixosTest.nix
Normal file
201
deployment/check/common/nixosTest.nix
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
config,
|
||||
hostPkgs,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins)
|
||||
concatStringsSep
|
||||
toJSON
|
||||
;
|
||||
inherit (lib)
|
||||
types
|
||||
fileset
|
||||
mkOption
|
||||
genAttrs
|
||||
attrNames
|
||||
optionalString
|
||||
;
|
||||
inherit (hostPkgs)
|
||||
runCommandNoCC
|
||||
writeText
|
||||
system
|
||||
;
|
||||
|
||||
forConcat = xs: f: concatStringsSep "\n" (map f xs);
|
||||
|
||||
## We will need to override some inputs by the empty flake, so we make one.
|
||||
emptyFlake = runCommandNoCC "empty-flake" { } ''
|
||||
mkdir $out
|
||||
echo "{ outputs = { self }: {}; }" > $out/flake.nix
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixosTest";
|
||||
|
||||
imports = [
|
||||
./sharedOptions.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
## FIXME: I wish I could just use `testScript` but with something like
|
||||
## `mkOrder` to put this module's string before something else.
|
||||
extraTestScript = mkOption { };
|
||||
|
||||
sourceFileset = mkOption {
|
||||
## FIXME: grab `lib.types.fileset` from NixOS, once upstreaming PR
|
||||
## https://github.com/NixOS/nixpkgs/pull/428293 lands.
|
||||
type = types.mkOptionType {
|
||||
name = "fileset";
|
||||
description = "fileset";
|
||||
descriptionClass = "noun";
|
||||
check = (x: (builtins.tryEval (fileset.unions [ x ])).success);
|
||||
merge = (_: defs: fileset.unions (map (x: x.value) defs));
|
||||
};
|
||||
description = ''
|
||||
A fileset that will be copied to the deployer node in the current
|
||||
working directory. This should contain all the files that are
|
||||
necessary to run that particular test, such as the NixOS
|
||||
modules necessary to evaluate a deployment.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
sourceFileset = fileset.unions [
|
||||
# NOTE: not the flake itself; it will be overridden.
|
||||
../../../mkFlake.nix
|
||||
../../../flake.lock
|
||||
../../../npins
|
||||
|
||||
./sharedOptions.nix
|
||||
./targetNode.nix
|
||||
./targetResource.nix
|
||||
|
||||
(config.pathToCwd + "/flake-under-test.nix")
|
||||
];
|
||||
|
||||
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
|
||||
|
||||
nodes =
|
||||
{
|
||||
deployer = {
|
||||
imports = [ ./deployerNode.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = config.nodes.acme.networking.primaryIPAddress;
|
||||
};
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
(
|
||||
if config.enableAcme then
|
||||
{
|
||||
acme = {
|
||||
## FIXME: This makes `nodes.acme` into a local resolver. Maybe this will
|
||||
## break things once we play with DNS?
|
||||
imports = [ "${inputs.nixpkgs}/nixos/tests/common/acme/server" ];
|
||||
## We aren't testing ACME - we just want certificates.
|
||||
systemd.services.pebble.environment.PEBBLE_VA_ALWAYS_VALID = "1";
|
||||
};
|
||||
}
|
||||
else
|
||||
{ }
|
||||
)
|
||||
|
||||
//
|
||||
|
||||
genAttrs config.targetMachines (_: {
|
||||
imports = [ ./targetNode.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = if config.enableAcme then config.nodes.acme.networking.primaryIPAddress else null;
|
||||
});
|
||||
|
||||
testScript = ''
|
||||
${forConcat (attrNames config.nodes) (n: ''
|
||||
${n}.start()
|
||||
'')}
|
||||
|
||||
${forConcat (attrNames config.nodes) (n: ''
|
||||
${n}.wait_for_unit("multi-user.target")
|
||||
'')}
|
||||
|
||||
## A subset of the repository that is necessary for this test. It will be
|
||||
## copied inside the test. The smaller this set, the faster our CI, because we
|
||||
## won't need to re-run when things change outside of it.
|
||||
with subtest("Unpacking"):
|
||||
deployer.succeed("cp -r --no-preserve=mode ${
|
||||
fileset.toSource {
|
||||
root = ../../..;
|
||||
fileset = config.sourceFileset;
|
||||
}
|
||||
}/* .")
|
||||
|
||||
with subtest("Configure the network"):
|
||||
${forConcat config.targetMachines (
|
||||
tm:
|
||||
let
|
||||
targetNetworkJSON = writeText "target-network.json" (
|
||||
toJSON config.nodes.${tm}.system.build.networkConfig
|
||||
);
|
||||
in
|
||||
''
|
||||
deployer.copy_from_host("${targetNetworkJSON}", "${config.pathFromRoot}/${tm}-network.json")
|
||||
''
|
||||
)}
|
||||
|
||||
with subtest("Configure the deployer key"):
|
||||
deployer.succeed("""mkdir -p ~/.ssh && ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa""")
|
||||
deployer_key = deployer.succeed("cat ~/.ssh/id_rsa.pub").strip()
|
||||
${forConcat config.targetMachines (tm: ''
|
||||
${tm}.succeed(f"mkdir -p /root/.ssh && echo '{deployer_key}' >> /root/.ssh/authorized_keys")
|
||||
'')}
|
||||
|
||||
with subtest("Configure the target host key"):
|
||||
${forConcat config.targetMachines (tm: ''
|
||||
host_key = ${tm}.succeed("ssh-keyscan ${tm} | grep -v '^#' | cut -f 2- -d ' ' | head -n 1")
|
||||
deployer.succeed(f"echo '{host_key}' > ${config.pathFromRoot}/${tm}_host_key.pub")
|
||||
'')}
|
||||
|
||||
## NOTE: This is super slow. It could probably be optimised in Nix, for
|
||||
## instance by allowing to grab things directly from the host's store.
|
||||
##
|
||||
## NOTE: We use the repository as-is (cf `src` above), overriding only
|
||||
## `flake.nix` by our `flake-under-test.nix`. We also override the flake
|
||||
## lock file to use locally available inputs, as we cannot download them.
|
||||
##
|
||||
with subtest("Override the flake and its lock"):
|
||||
deployer.succeed("cp ${config.pathFromRoot}/flake-under-test.nix flake.nix")
|
||||
deployer.succeed("""
|
||||
nix flake lock --extra-experimental-features 'flakes nix-command' \
|
||||
--offline -v \
|
||||
--override-input nixops4 ${inputs.nixops4.packages.${system}.flake-in-a-bottle} \
|
||||
\
|
||||
--override-input nixops4-nixos ${inputs.nixops4-nixos} \
|
||||
--override-input nixops4-nixos/flake-parts ${inputs.nixops4-nixos.inputs.flake-parts} \
|
||||
--override-input nixops4-nixos/flake-parts/nixpkgs-lib ${inputs.nixops4-nixos.inputs.flake-parts.inputs.nixpkgs-lib} \
|
||||
--override-input nixops4-nixos/nixops4-nixos ${emptyFlake} \
|
||||
--override-input nixops4-nixos/nixpkgs ${inputs.nixops4-nixos.inputs.nixpkgs} \
|
||||
--override-input nixops4-nixos/nixops4 ${
|
||||
inputs.nixops4-nixos.inputs.nixops4.packages.${system}.flake-in-a-bottle
|
||||
} \
|
||||
--override-input nixops4-nixos/git-hooks-nix ${emptyFlake} \
|
||||
;
|
||||
""")
|
||||
|
||||
${optionalString config.enableAcme ''
|
||||
with subtest("Set up handmade DNS"):
|
||||
deployer.succeed("echo '${config.nodes.acme.networking.primaryIPAddress}' > ${config.pathFromRoot}/acme_server_ip")
|
||||
''}
|
||||
|
||||
${config.extraTestScript}
|
||||
'';
|
||||
};
|
||||
}
|
||||
68
deployment/check/common/sharedOptions.nix
Normal file
68
deployment/check/common/sharedOptions.nix
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
/**
|
||||
This file contains options shared by various components of the integration test, i.e. deployment resources, test nodes, target configurations, etc.
|
||||
All these components are declared as modules, but are part of different evaluations, which is the options in this file can't be shared "directly".
|
||||
Instead, each component imports this module and the same values are set for each of them from a common call site.
|
||||
Not all components will use all the options, which allows not setting all the values.
|
||||
*/
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
|
||||
in
|
||||
# `config` not set and imported from multiple places: no fixed module class
|
||||
{
|
||||
options = {
|
||||
targetMachines = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = ''
|
||||
Names of the nodes in the NixOS test that are “target machines”. This is
|
||||
used by the infrastructure to extract their network configuration, among
|
||||
other things, and re-import it in the deployment.
|
||||
'';
|
||||
};
|
||||
|
||||
pathToRoot = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path from the location of the working directory to the root of the
|
||||
repository.
|
||||
'';
|
||||
};
|
||||
|
||||
pathFromRoot = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path from the root of the repository to the working directory.
|
||||
'';
|
||||
apply = x: lib.path.removePrefix config.pathToRoot x;
|
||||
};
|
||||
|
||||
pathToCwd = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to the current working directory. This is a shortcut for
|
||||
pathToRoot/pathFromRoot.
|
||||
'';
|
||||
default = config.pathToRoot + "/${config.pathFromRoot}";
|
||||
};
|
||||
|
||||
enableAcme = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Whether to enable ACME in the NixOS test. This will add an ACME server
|
||||
to the node and connect all the target machines to it.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
acmeNodeIP = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The IP of the ACME node in the NixOS test. This option will be set
|
||||
during the test to the correct value.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
67
deployment/check/common/targetNode.nix
Normal file
67
deployment/check/common/targetNode.nix
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
{
|
||||
inputs,
|
||||
config,
|
||||
lib,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
testCerts = import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix";
|
||||
inherit (lib) mkIf mkMerge;
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
(modulesPath + "/../lib/testing/nixos-test-base.nix")
|
||||
./sharedOptions.nix
|
||||
];
|
||||
|
||||
config = mkMerge [
|
||||
{
|
||||
## Test framework disables switching by default. That might be OK by itself,
|
||||
## but we also use this config for getting the dependencies in
|
||||
## `deployer.system.extraDependencies`.
|
||||
system.switch.enable = true;
|
||||
|
||||
nix = {
|
||||
## Not used; save a large copy operation
|
||||
channel.enable = false;
|
||||
registry = lib.mkForce { };
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PermitRootLogin = "yes";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 22 ];
|
||||
|
||||
## Test VMs don't have a bootloader by default.
|
||||
boot.loader.grub.enable = false;
|
||||
}
|
||||
|
||||
(mkIf config.enableAcme {
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults.email = "test@test.com";
|
||||
defaults.server = "https://acme.test/dir";
|
||||
};
|
||||
|
||||
security.pki.certificateFiles = [
|
||||
## NOTE: This certificate is the one used by the Pebble HTTPS server.
|
||||
## This is NOT the root CA of the Pebble server. We do add it here so
|
||||
## that Pebble clients can talk to its API, but this will not allow
|
||||
## those machines to verify generated certificates.
|
||||
testCerts.ca.cert
|
||||
];
|
||||
|
||||
## FIXME: it is a bit sad that all this logistics is necessary. look into
|
||||
## better DNS stuff
|
||||
networking.extraHosts = "${config.acmeNodeIP} acme.test";
|
||||
})
|
||||
];
|
||||
}
|
||||
51
deployment/check/common/targetResource.nix
Normal file
51
deployment/check/common/targetResource.nix
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
config,
|
||||
sources,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins) readFile;
|
||||
inherit (lib) trim mkOption types;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
imports = [ ./sharedOptions.nix ];
|
||||
|
||||
options = {
|
||||
nodeName = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The name of the node in the NixOS test;
|
||||
needed for recovering the node configuration to prepare its deployment.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
ssh = {
|
||||
host = config.nodeName;
|
||||
hostPublicKey = readFile (config.pathToCwd + "/${config.nodeName}_host_key.pub");
|
||||
};
|
||||
|
||||
nixpkgs = inputs.nixpkgs;
|
||||
|
||||
nixos.module = {
|
||||
imports = [
|
||||
./targetNode.nix
|
||||
(lib.modules.importJSON (config.pathToCwd + "/${config.nodeName}-network.json"))
|
||||
];
|
||||
|
||||
_module.args = { inherit inputs sources; };
|
||||
enableAcme = config.enableAcme;
|
||||
acmeNodeIP = trim (readFile (config.pathToCwd + "/acme_server_ip"));
|
||||
|
||||
nixpkgs.hostPlatform = "x86_64-linux";
|
||||
};
|
||||
};
|
||||
}
|
||||
11
deployment/check/panel/constants.nix
Normal file
11
deployment/check/panel/constants.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
targetMachines = [
|
||||
"garage"
|
||||
"mastodon"
|
||||
"peertube"
|
||||
"pixelfed"
|
||||
];
|
||||
pathToRoot = ../../..;
|
||||
pathFromRoot = ./.;
|
||||
enableAcme = true;
|
||||
}
|
||||
19
deployment/check/panel/default.nix
Normal file
19
deployment/check/panel/default.nix
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
runNixOSTest,
|
||||
inputs,
|
||||
sources,
|
||||
}:
|
||||
|
||||
runNixOSTest {
|
||||
imports = [
|
||||
../common/nixosTest.nix
|
||||
./nixosTest.nix
|
||||
];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit (import ./constants.nix)
|
||||
targetMachines
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
}
|
||||
58
deployment/check/panel/deployment.nix
Normal file
58
deployment/check/panel/deployment.nix
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
{
|
||||
inputs,
|
||||
sources,
|
||||
lib,
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins) fromJSON listToAttrs;
|
||||
inherit (import ./constants.nix)
|
||||
targetMachines
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
|
||||
makeTargetResource = nodeName: {
|
||||
imports = [ ../common/targetResource.nix ];
|
||||
_module.args = { inherit inputs sources; };
|
||||
inherit
|
||||
nodeName
|
||||
pathToRoot
|
||||
pathFromRoot
|
||||
enableAcme
|
||||
;
|
||||
};
|
||||
|
||||
## The deployment function - what we are here to test!
|
||||
##
|
||||
## TODO: Modularise `deployment/default.nix` to get rid of the nested
|
||||
## function calls.
|
||||
makeTestDeployment =
|
||||
args:
|
||||
(import ../..)
|
||||
{
|
||||
inherit lib;
|
||||
inherit (inputs) nixops4 nixops4-nixos;
|
||||
fediversity = import ../../../services/fediversity;
|
||||
}
|
||||
(listToAttrs (
|
||||
map (nodeName: {
|
||||
name = "${nodeName}ConfigurationResource";
|
||||
value = makeTargetResource nodeName;
|
||||
}) targetMachines
|
||||
))
|
||||
args;
|
||||
|
||||
in
|
||||
makeTestDeployment (
|
||||
fromJSON (
|
||||
let
|
||||
env = builtins.getEnv "DEPLOYMENT";
|
||||
in
|
||||
if env == "" then
|
||||
throw "The DEPLOYMENT environment needs to be set. You do not want to use this deployment unless in the `deployment-panel` NixOS test."
|
||||
else
|
||||
env
|
||||
)
|
||||
)
|
||||
26
deployment/check/panel/flake-under-test.nix
Normal file
26
deployment/check/panel/flake-under-test.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
inputs = {
|
||||
nixops4.follows = "nixops4-nixos/nixops4";
|
||||
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs:
|
||||
import ./mkFlake.nix inputs (
|
||||
{
|
||||
inputs,
|
||||
sources,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
inputs.nixops4.modules.flake.default
|
||||
];
|
||||
|
||||
nixops4Deployments.check-deployment-panel = import ./deployment/check/panel/deployment.nix {
|
||||
inherit inputs sources lib;
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
377
deployment/check/panel/nixosTest.nix
Normal file
377
deployment/check/panel/nixosTest.nix
Normal file
|
|
@ -0,0 +1,377 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
hostPkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
getExe
|
||||
;
|
||||
|
||||
## Some places need a dummy file that will in fact never be used. We create
|
||||
## it here.
|
||||
dummyFile = hostPkgs.writeText "dummy" "dummy";
|
||||
panelPort = 8000;
|
||||
|
||||
panelUser = "test";
|
||||
panelEmail = "test@test.com";
|
||||
panelPassword = "ouiprdaaa43"; # panel's manager complains if too close to username or email
|
||||
|
||||
fediUser = "test";
|
||||
fediEmail = "test@test.com";
|
||||
fediPassword = "testtest";
|
||||
fediName = "Testy McTestface";
|
||||
|
||||
toPythonBool = b: if b then "True" else "False";
|
||||
|
||||
interactWithPanel =
|
||||
{
|
||||
baseUri,
|
||||
enableMastodon,
|
||||
enablePeertube,
|
||||
enablePixelfed,
|
||||
}:
|
||||
hostPkgs.writers.writePython3Bin "interact-with-panel"
|
||||
{
|
||||
libraries = with hostPkgs.python3Packages; [ selenium ];
|
||||
flakeIgnore = [
|
||||
"E302" # expected 2 blank lines, found 0
|
||||
"E303" # too many blank lines
|
||||
"E305" # expected 2 blank lines after end of function or class
|
||||
"E501" # line too long (> 79 characters)
|
||||
"E731" # do not assign lambda expression, use a def
|
||||
];
|
||||
}
|
||||
''
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
||||
print("Create and configure driver...")
|
||||
options = Options()
|
||||
options.add_argument("--headless")
|
||||
options.binary_location = "${getExe hostPkgs.firefox-unwrapped}"
|
||||
service = webdriver.FirefoxService(executable_path="${getExe hostPkgs.geckodriver}")
|
||||
driver = webdriver.Firefox(options=options, service=service)
|
||||
driver.set_window_size(1280, 960)
|
||||
driver.implicitly_wait(360)
|
||||
driver.command_executor.set_timeout(3600)
|
||||
|
||||
print("Open login page...")
|
||||
driver.get("${baseUri}/login/")
|
||||
print("Enter username...")
|
||||
driver.find_element(By.XPATH, "//input[@name = 'username']").send_keys("${panelUser}")
|
||||
print("Enter password...")
|
||||
driver.find_element(By.XPATH, "//input[@name = 'password']").send_keys("${panelPassword}")
|
||||
print("Click “Login” button...")
|
||||
driver.find_element(By.XPATH, "//button[normalize-space() = 'Login']").click()
|
||||
|
||||
print("Open configuration page...")
|
||||
driver.get("${baseUri}/configuration/")
|
||||
|
||||
# Helpers to actually set and not add or switch input values.
|
||||
def input_set(elt, keys):
|
||||
elt.clear()
|
||||
elt.send_keys(keys)
|
||||
def checkbox_set(elt, new_value):
|
||||
if new_value != elt.is_selected():
|
||||
elt.click()
|
||||
|
||||
print("Enable Fediversity...")
|
||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'enable']"), True)
|
||||
|
||||
print("Fill in initialUser info...")
|
||||
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.username']"), "${fediUser}")
|
||||
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.password']"), "${fediPassword}")
|
||||
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.email']"), "${fediEmail}")
|
||||
input_set(driver.find_element(By.XPATH, "//input[@name = 'initialUser.displayName']"), "${fediName}")
|
||||
|
||||
print("Enable services...")
|
||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
|
||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
|
||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
|
||||
|
||||
print("Start deployment...")
|
||||
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
|
||||
|
||||
print("Wait for deployment status to show up...")
|
||||
get_deployment_result = lambda d: d.find_element(By.XPATH, "//div[@id = 'deployment-result']//p")
|
||||
WebDriverWait(driver, timeout=3660, poll_frequency=10).until(get_deployment_result)
|
||||
deployment_result = get_deployment_result(driver).get_attribute('innerHTML')
|
||||
|
||||
print("Quit...")
|
||||
driver.quit()
|
||||
|
||||
match deployment_result:
|
||||
case 'Deployment Succeeded':
|
||||
print("Deployment has succeeded; exiting normally")
|
||||
exit(0)
|
||||
case 'Deployment Failed':
|
||||
print("Deployment has failed; exiting with return code `1`")
|
||||
exit(1)
|
||||
case _:
|
||||
print(f"Unexpected deployment result: {deployment_result}; exiting with return code `2`")
|
||||
exit(2)
|
||||
'';
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
_class = "nixosTest";
|
||||
|
||||
name = "deployment-panel";
|
||||
|
||||
sourceFileset = lib.fileset.unions [
|
||||
./constants.nix
|
||||
./deployment.nix
|
||||
|
||||
# REVIEW: I would like to be able to grab all of `/deployment` minus
|
||||
# `/deployment/check`, but I can't because there is a bunch of other files
|
||||
# in `/deployment`. Maybe we can think of a reorg making things more robust
|
||||
# here? (comment also in CLI test)
|
||||
../../default.nix
|
||||
../../options.nix
|
||||
|
||||
../../../services/fediversity
|
||||
];
|
||||
|
||||
## The panel's module sets `nixpkgs.overlays` which clashes with
|
||||
## `pkgsReadOnly`. We disable it here.
|
||||
node.pkgsReadOnly = false;
|
||||
|
||||
nodes.deployer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(import ../../../panel { }).module
|
||||
];
|
||||
|
||||
## FIXME: This should be in the common stuff.
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults.email = "test@test.com";
|
||||
defaults.server = "https://acme.test/dir";
|
||||
};
|
||||
security.pki.certificateFiles = [
|
||||
(import "${inputs.nixpkgs}/nixos/tests/common/acme/server/snakeoil-certs.nix").ca.cert
|
||||
];
|
||||
networking.extraHosts = "${config.acmeNodeIP} acme.test";
|
||||
|
||||
services.panel = {
|
||||
enable = true;
|
||||
production = true;
|
||||
domain = "deployer";
|
||||
secrets = {
|
||||
SECRET_KEY = dummyFile;
|
||||
};
|
||||
port = panelPort;
|
||||
|
||||
deployment = {
|
||||
flake = "/run/fedipanel/flake";
|
||||
name = "check-deployment-panel";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.expect ];
|
||||
|
||||
## FIXME: The following dependencies are necessary but I do not
|
||||
## understand why they are not covered by the fake node.
|
||||
system.extraDependencies = with pkgs; [
|
||||
peertube
|
||||
peertube.inputDerivation
|
||||
gixy # a configuration checker for nginx
|
||||
gixy.inputDerivation
|
||||
];
|
||||
|
||||
system.extraDependenciesFromModule = {
|
||||
imports = [ ../../../services/fediversity ];
|
||||
fediversity = {
|
||||
domain = "fediversity.net"; # would write `dummy` but that would not type
|
||||
garage.enable = true;
|
||||
mastodon = {
|
||||
enable = true;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
peertube = {
|
||||
enable = true;
|
||||
secretsFile = dummyFile;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
pixelfed = {
|
||||
enable = true;
|
||||
s3AccessKeyFile = dummyFile;
|
||||
s3SecretKeyFile = dummyFile;
|
||||
};
|
||||
temp.cores = 1;
|
||||
temp.initialUser = {
|
||||
username = "dummy";
|
||||
displayName = "dummy";
|
||||
email = "dummy";
|
||||
passwordFile = dummyFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nodes.client =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
httpie
|
||||
dnsutils # for `dig`
|
||||
openssl
|
||||
cacert
|
||||
wget
|
||||
python3
|
||||
python3Packages.selenium
|
||||
firefox-unwrapped
|
||||
geckodriver
|
||||
];
|
||||
|
||||
security.pki.certificateFiles = [
|
||||
config.nodes.acme.test-support.acme.caCert
|
||||
];
|
||||
networking.extraHosts = "${config.acmeNodeIP} acme.test";
|
||||
};
|
||||
|
||||
## NOTE: The target machines may need more RAM than the default to handle
|
||||
## being deployed to, otherwise we get something like:
|
||||
##
|
||||
## pixelfed # [ 616.785499 ] sshd-session[1167]: Conection closed by 2001:db8:1::2 port 45004
|
||||
## deployer # error: writing to file: No space left on device
|
||||
## pixelfed # [ 616.788538 ] sshd-session[1151]: pam_unix(sshd:session): session closed for user port
|
||||
## pixelfed # [ 616.793929 ] systemd-logind[719]: Session 4 logged out. Waiting for processes to exit.
|
||||
## deployer # Error: Could not create resource
|
||||
##
|
||||
## These values have been trimmed down to the gigabyte.
|
||||
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
||||
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
||||
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
||||
|
||||
## FIXME: The test of presence of the services are very simple: we only
|
||||
## check that there is a systemd service of the expected name on the
|
||||
## machine. This proves at least that NixOps4 did something, and we cannot
|
||||
## really do more for now because the services aren't actually working
|
||||
## properly, in particular because of DNS issues. We should fix the services
|
||||
## and check that they are working properly.
|
||||
|
||||
extraTestScript = ''
|
||||
## TODO: We want a nicer way to control where the FediPanel consumes its
|
||||
## flake, which can default to the store but could also be somewhere else if
|
||||
## someone wanted to change the code of the flake.
|
||||
##
|
||||
with subtest("Give the panel access to the flake"):
|
||||
deployer.succeed("mkdir /run/fedipanel /run/fedipanel/flake >&2")
|
||||
deployer.succeed("cp -R . /run/fedipanel/flake >&2")
|
||||
deployer.succeed("chown -R panel:panel /run/fedipanel >&2")
|
||||
|
||||
## TODO: I want a programmatic way to provide an SSH key to the panel (and
|
||||
## therefore NixOps4). This should happen either in the Python code, but
|
||||
## maybe it is fair that that one picks up on the user's key? It could
|
||||
## also be in the Nix packaging.
|
||||
##
|
||||
with subtest("Set up the panel's SSH keys"):
|
||||
deployer.succeed("mkdir /home/panel/.ssh >&2")
|
||||
deployer.succeed("cp -R /root/.ssh/* /home/panel/.ssh >&2")
|
||||
deployer.succeed("chown -R panel:panel /home/panel/.ssh >&2")
|
||||
deployer.succeed("chmod 600 /home/panel/.ssh/* >&2")
|
||||
|
||||
## TODO: This is a hack to accept the root CA used by Pebble on the client
|
||||
## machine. Pebble randomizes everything, so the only way to get it is to
|
||||
## call the /roots/0 endpoint at runtime, leaving not much margin for a nice
|
||||
## Nixy way of adding the certificate. There is no way around it as this is
|
||||
## by design in Pebble, showing in fact that Pebble was not the appropriate
|
||||
## tool for our use and that nixpkgs does not in fact provide an easy way to
|
||||
## generate _usable_ certificates in NixOS tests. I suggest we merge this,
|
||||
## and track the task to set it up in a cleaner way. I would tackle this in
|
||||
## a subsequent PR, and hopefully even contribute this BetterWay(tm) to
|
||||
## nixpkgs. — Niols
|
||||
##
|
||||
with subtest("Set up ACME root CA on client"):
|
||||
client.succeed("""
|
||||
cd /etc/ssl/certs
|
||||
curl -o pebble-root-ca.pem https://acme.test:15000/roots/0
|
||||
curl -o pebble-intermediate-ca.pem https://acme.test:15000/intermediates/0
|
||||
{ cat ca-bundle.crt
|
||||
cat pebble-root-ca.pem
|
||||
cat pebble-intermediate-ca.pem
|
||||
} > new-ca-bundle.crt
|
||||
rm ca-bundle.crt ca-certificates.crt
|
||||
mv new-ca-bundle.crt ca-bundle.crt
|
||||
ln -s ca-bundle.crt ca-certificates.crt
|
||||
""")
|
||||
|
||||
## TODO: I would hope for a more declarative way to add users. This should
|
||||
## be handled by the Nix packaging of the FediPanel. — Niols
|
||||
##
|
||||
with subtest("Create panel user"):
|
||||
deployer.succeed("""
|
||||
expect -c '
|
||||
spawn manage createsuperuser --username ${panelUser} --email ${panelEmail}
|
||||
expect "Password: "; send "${panelPassword}\\n";
|
||||
expect "Password (again): "; send "${panelPassword}\\n"
|
||||
interact
|
||||
' >&2
|
||||
""")
|
||||
|
||||
with subtest("Check the status of the services - there should be none"):
|
||||
garage.fail("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with no services enabled"):
|
||||
client.succeed("${
|
||||
interactWithPanel {
|
||||
baseUri = "https://deployer";
|
||||
enableMastodon = false;
|
||||
enablePeertube = false;
|
||||
enablePixelfed = false;
|
||||
}
|
||||
}/bin/interact-with-panel >&2")
|
||||
|
||||
with subtest("Check the status of the services - there should still be none"):
|
||||
garage.fail("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
||||
client.succeed("${
|
||||
interactWithPanel {
|
||||
baseUri = "https://deployer";
|
||||
enableMastodon = true;
|
||||
enablePeertube = false;
|
||||
enablePixelfed = true;
|
||||
}
|
||||
}/bin/interact-with-panel >&2")
|
||||
|
||||
with subtest("Check the status of the services - expecting Garage, Mastodon and Pixelfed"):
|
||||
garage.succeed("systemctl status garage.service")
|
||||
mastodon.succeed("systemctl status mastodon-web.service")
|
||||
peertube.fail("systemctl status peertube.service")
|
||||
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
||||
|
||||
with subtest("Run deployment with only Peertube enabled"):
|
||||
client.succeed("${
|
||||
interactWithPanel {
|
||||
baseUri = "https://deployer";
|
||||
enableMastodon = false;
|
||||
enablePeertube = true;
|
||||
enablePixelfed = false;
|
||||
}
|
||||
}/bin/interact-with-panel >&2")
|
||||
|
||||
with subtest("Check the status of the services - expecting Garage and Peertube"):
|
||||
garage.succeed("systemctl status garage.service")
|
||||
mastodon.fail("systemctl status mastodon-web.service")
|
||||
peertube.succeed("systemctl status peertube.service")
|
||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||
'';
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"domain": "abundos.eu",
|
||||
"domain": "fediversity.net",
|
||||
"mastodon": { "enable": false },
|
||||
"peertube": { "enable": false },
|
||||
"pixelfed": { "enable": false },
|
||||
70
deployment/data-model-test.nix
Normal file
70
deployment/data-model-test.nix
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
let
|
||||
inherit (import ../default.nix { }) pkgs inputs;
|
||||
inherit (pkgs) lib;
|
||||
inherit (lib) mkOption;
|
||||
eval =
|
||||
module:
|
||||
(lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit inputs;
|
||||
};
|
||||
modules = [
|
||||
module
|
||||
./data-model.nix
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
{
|
||||
_class = "nix-unit";
|
||||
|
||||
test-eval = {
|
||||
expr =
|
||||
let
|
||||
fediversity = eval (
|
||||
{ config, ... }:
|
||||
{
|
||||
config = {
|
||||
applications.hello =
|
||||
{ ... }:
|
||||
{
|
||||
description = ''Command-line tool that will print "Hello, world!" on the terminal'';
|
||||
module =
|
||||
{ ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "Hello in the shell";
|
||||
};
|
||||
};
|
||||
implementation =
|
||||
cfg:
|
||||
lib.optionalAttrs cfg.enable {
|
||||
dummy.login-shell.packages.hello = pkgs.hello;
|
||||
};
|
||||
};
|
||||
};
|
||||
options = {
|
||||
example-configuration = mkOption {
|
||||
type = config.configuration;
|
||||
readOnly = true;
|
||||
default = {
|
||||
enable = true;
|
||||
applications.hello.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit (fediversity)
|
||||
example-configuration
|
||||
;
|
||||
};
|
||||
expected = {
|
||||
example-configuration = {
|
||||
enable = true;
|
||||
applications.hello.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
89
deployment/data-model.nix
Normal file
89
deployment/data-model.nix
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (lib.types)
|
||||
attrsOf
|
||||
attrTag
|
||||
deferredModuleWith
|
||||
submodule
|
||||
optionType
|
||||
functionTo
|
||||
;
|
||||
|
||||
functionType = import ./function.nix;
|
||||
application-resources = {
|
||||
options.resources = mkOption {
|
||||
# TODO: maybe transpose, and group the resources by type instead
|
||||
type = attrsOf (
|
||||
attrTag (lib.mapAttrs (_name: resource: mkOption { type = resource.request; }) config.resources)
|
||||
);
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
_class = "nixops4Deployment";
|
||||
|
||||
options = {
|
||||
applications = mkOption {
|
||||
description = "Collection of Fediversity applications";
|
||||
type = attrsOf (
|
||||
submodule (application: {
|
||||
_class = "fediversity-application";
|
||||
options = {
|
||||
description = mkOption {
|
||||
description = "Description to be shown in the application overview";
|
||||
type = types.str;
|
||||
};
|
||||
module = mkOption {
|
||||
description = "Operator-facing configuration options for the application";
|
||||
type = deferredModuleWith { staticModules = [ { _class = "fediversity-application-config"; } ]; };
|
||||
};
|
||||
implementation = mkOption {
|
||||
description = "Mapping of application configuration to deployment resources, a description of what an application needs to run";
|
||||
type = application.config.config-mapping.function-type;
|
||||
};
|
||||
resources = mkOption {
|
||||
description = "Compute resources required by an application";
|
||||
type = functionTo application.config.config-mapping.output-type;
|
||||
readOnly = true;
|
||||
default = input: (application.config.implementation input).output;
|
||||
};
|
||||
config-mapping = mkOption {
|
||||
description = "Function type for the mapping from application configuration to required resources";
|
||||
type = submodule functionType;
|
||||
readOnly = true;
|
||||
default = {
|
||||
input-type = application.config.module;
|
||||
output-type = application-resources;
|
||||
};
|
||||
};
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
configuration = mkOption {
|
||||
description = "Configuration type declaring options to be set by operators";
|
||||
type = optionType;
|
||||
readOnly = true;
|
||||
default = submodule {
|
||||
options = {
|
||||
enable = lib.mkEnableOption {
|
||||
description = "your Fediversity configuration";
|
||||
};
|
||||
applications = lib.mapAttrs (
|
||||
_name: application:
|
||||
mkOption {
|
||||
description = application.description;
|
||||
type = submodule application.module;
|
||||
default = { };
|
||||
}
|
||||
) config.applications;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -33,17 +33,40 @@
|
|||
## information coming from the FediPanel.
|
||||
##
|
||||
## FIXME: lock step the interface with the definitions in the FediPanel
|
||||
panelConfig:
|
||||
panelConfigNullable:
|
||||
|
||||
let
|
||||
inherit (lib) mkIf;
|
||||
|
||||
## The convertor from module options to JSON schema does not generate proper
|
||||
## JSON schema types, forcing us to use nullable fields for default values.
|
||||
## However, working with those fields in the deployment code is annoying (and
|
||||
## unusual for Nix programmers), so we sanitize the input here and add back
|
||||
## the default value by hand.
|
||||
nonNull = x: v: if x == null then v else x;
|
||||
panelConfig = {
|
||||
domain = nonNull panelConfigNullable.domain "fediversity.net";
|
||||
initialUser = nonNull panelConfigNullable.initialUser {
|
||||
displayName = "Testy McTestface";
|
||||
username = "test";
|
||||
password = "testtest";
|
||||
email = "test@test.com";
|
||||
};
|
||||
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
|
||||
peertube = nonNull panelConfigNullable.peertube { enable = false; };
|
||||
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
|
||||
};
|
||||
in
|
||||
|
||||
## Regular arguments of a NixOps4 deployment module.
|
||||
{ providers, ... }:
|
||||
{ config, providers, ... }:
|
||||
|
||||
let
|
||||
cfg = config.deployment;
|
||||
in
|
||||
{
|
||||
_class = "nixops4Deployment";
|
||||
|
||||
options = {
|
||||
deployment = lib.mkOption {
|
||||
description = ''
|
||||
|
|
@ -52,6 +75,7 @@ in
|
|||
# XXX(@fricklerhandwerk):
|
||||
# misusing this will produce obscure errors that will be truncated by NixOps4
|
||||
type = lib.types.submodule ./options.nix;
|
||||
default = panelConfig;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -116,9 +140,9 @@ in
|
|||
{
|
||||
garage-configuration = makeConfigurationResource garageConfigurationResource (
|
||||
{ pkgs, ... }:
|
||||
mkIf (panelConfig.mastodon.enable || panelConfig.peertube.enable || panelConfig.pixelfed.enable) {
|
||||
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) {
|
||||
fediversity = {
|
||||
inherit (panelConfig) domain;
|
||||
inherit (cfg) domain;
|
||||
garage.enable = true;
|
||||
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
|
||||
mastodon = mastodonS3KeyConfig { inherit pkgs; };
|
||||
|
|
@ -129,14 +153,14 @@ in
|
|||
|
||||
mastodon-configuration = makeConfigurationResource mastodonConfigurationResource (
|
||||
{ pkgs, ... }:
|
||||
mkIf panelConfig.mastodon.enable {
|
||||
mkIf cfg.mastodon.enable {
|
||||
fediversity = {
|
||||
inherit (panelConfig) domain;
|
||||
inherit (cfg) domain;
|
||||
temp.initialUser = {
|
||||
inherit (panelConfig.initialUser) username email displayName;
|
||||
inherit (cfg.initialUser) username email displayName;
|
||||
# FIXME: disgusting, but nvm, this is going to be replaced by
|
||||
# proper central authentication at some point
|
||||
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
|
||||
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
|
||||
};
|
||||
|
||||
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
|
||||
|
|
@ -150,14 +174,14 @@ in
|
|||
|
||||
peertube-configuration = makeConfigurationResource peertubeConfigurationResource (
|
||||
{ pkgs, ... }:
|
||||
mkIf panelConfig.peertube.enable {
|
||||
mkIf cfg.peertube.enable {
|
||||
fediversity = {
|
||||
inherit (panelConfig) domain;
|
||||
inherit (cfg) domain;
|
||||
temp.initialUser = {
|
||||
inherit (panelConfig.initialUser) username email displayName;
|
||||
inherit (cfg.initialUser) username email displayName;
|
||||
# FIXME: disgusting, but nvm, this is going to be replaced by
|
||||
# proper central authentication at some point
|
||||
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
|
||||
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
|
||||
};
|
||||
|
||||
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
|
||||
|
|
@ -173,14 +197,14 @@ in
|
|||
|
||||
pixelfed-configuration = makeConfigurationResource pixelfedConfigurationResource (
|
||||
{ pkgs, ... }:
|
||||
mkIf panelConfig.pixelfed.enable {
|
||||
mkIf cfg.pixelfed.enable {
|
||||
fediversity = {
|
||||
inherit (panelConfig) domain;
|
||||
inherit (cfg) domain;
|
||||
temp.initialUser = {
|
||||
inherit (panelConfig.initialUser) username email displayName;
|
||||
inherit (cfg.initialUser) username email displayName;
|
||||
# FIXME: disgusting, but nvm, this is going to be replaced by
|
||||
# proper central authentication at some point
|
||||
passwordFile = pkgs.writeText "password" panelConfig.initialUser.password;
|
||||
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
|
||||
};
|
||||
|
||||
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,26 @@
|
|||
{ inputs, sources, ... }:
|
||||
|
||||
{
|
||||
imports = [ ./check/basic/flake-part.nix ];
|
||||
_class = "flake";
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
checks = {
|
||||
deployment-basic = import ./check/basic {
|
||||
inherit (pkgs.testers) runNixOSTest;
|
||||
inherit inputs sources;
|
||||
};
|
||||
|
||||
deployment-cli = import ./check/cli {
|
||||
inherit (pkgs.testers) runNixOSTest;
|
||||
inherit inputs sources;
|
||||
};
|
||||
|
||||
deployment-panel = import ./check/panel {
|
||||
inherit (pkgs.testers) runNixOSTest;
|
||||
inherit inputs sources;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
|||
37
deployment/function.nix
Normal file
37
deployment/function.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
Modular function type
|
||||
*/
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
inherit (lib) mkOption types;
|
||||
inherit (types)
|
||||
deferredModule
|
||||
submodule
|
||||
functionTo
|
||||
optionType
|
||||
;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
input-type = mkOption {
|
||||
type = deferredModule;
|
||||
};
|
||||
output-type = mkOption {
|
||||
type = deferredModule;
|
||||
};
|
||||
function-type = mkOption {
|
||||
type = optionType;
|
||||
readOnly = true;
|
||||
default = functionTo (submodule {
|
||||
options = {
|
||||
input = mkOption {
|
||||
type = submodule config.input-type;
|
||||
};
|
||||
output = mkOption {
|
||||
type = submodule config.output-type;
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -17,6 +17,8 @@ let
|
|||
inherit (lib) types mkOption;
|
||||
in
|
||||
{
|
||||
_class = "nixops4Deployment";
|
||||
|
||||
options = {
|
||||
enable = lib.mkEnableOption "Fediversity configuration";
|
||||
domain = mkOption {
|
||||
|
|
|
|||
140
flake.lock
generated
140
flake.lock
generated
|
|
@ -59,22 +59,6 @@
|
|||
}
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat_3": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1733328505,
|
||||
|
|
@ -90,7 +74,7 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat_4": {
|
||||
"flake-compat_3": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696426674,
|
||||
|
|
@ -143,24 +127,6 @@
|
|||
}
|
||||
},
|
||||
"flake-parts_3": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738453229,
|
||||
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts_4": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"nixops4-nixos",
|
||||
|
|
@ -201,32 +167,12 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks": {
|
||||
"git-hooks-nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1742649964,
|
||||
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks-nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_2",
|
||||
"gitignore": "gitignore_2",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1737465171,
|
||||
"narHash": "sha256-R10v2hoJRLq8jcL4syVFag7nIGE7m13qO48wRIukWNg=",
|
||||
|
|
@ -281,27 +227,6 @@
|
|||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"git-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixops4-nixos",
|
||||
|
|
@ -341,8 +266,8 @@
|
|||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_3",
|
||||
"flake-parts": "flake-parts_4",
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-parts": "flake-parts_3",
|
||||
"git-hooks-nix": "git-hooks-nix_2",
|
||||
"nixfmt": "nixfmt",
|
||||
"nixpkgs": [
|
||||
|
|
@ -416,10 +341,10 @@
|
|||
},
|
||||
"nixops4": {
|
||||
"inputs": {
|
||||
"flake-parts": "flake-parts_3",
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nix": "nix",
|
||||
"nix-cargo-integration": "nix-cargo-integration",
|
||||
"nixpkgs": "nixpkgs_3",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-old": "nixpkgs-old"
|
||||
},
|
||||
"locked": {
|
||||
|
|
@ -438,7 +363,7 @@
|
|||
},
|
||||
"nixops4-nixos": {
|
||||
"inputs": {
|
||||
"flake-parts": "flake-parts_2",
|
||||
"flake-parts": "flake-parts",
|
||||
"git-hooks-nix": "git-hooks-nix",
|
||||
"nixops4": "nixops4",
|
||||
"nixops4-nixos": [
|
||||
|
|
@ -520,18 +445,6 @@
|
|||
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib_3": {
|
||||
"locked": {
|
||||
"lastModified": 1738452942,
|
||||
"narHash": "sha256-vJzFZGaCpnmo7I6i416HaBLpC+hvcURh/BQwROcGIp8=",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/072a6db25e947df2f31aab9eccd0ab75d5b2da11.tar.gz"
|
||||
}
|
||||
},
|
||||
"nixpkgs-old": {
|
||||
"locked": {
|
||||
"lastModified": 1735563628,
|
||||
|
|
@ -565,22 +478,6 @@
|
|||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730768919,
|
||||
"narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1738410390,
|
||||
"narHash": "sha256-xvTo0Aw0+veek7hvEVLzErmJyQkEcRk6PSR4zsRQFEc=",
|
||||
|
|
@ -596,22 +493,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_4": {
|
||||
"locked": {
|
||||
"lastModified": 1740463929,
|
||||
"narHash": "sha256-4Xhu/3aUdCKeLfdteEHMegx5ooKQvwPHNkOgNCXQrvc=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "5d7db4668d7a0c6cc5fc8cf6ef33b008b2b1ed8b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
|
|
@ -637,7 +518,7 @@
|
|||
},
|
||||
"purescript-overlay": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_4",
|
||||
"flake-compat": "flake-compat_3",
|
||||
"nixpkgs": [
|
||||
"nixops4-nixos",
|
||||
"nixops4",
|
||||
|
|
@ -680,14 +561,11 @@
|
|||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-parts": "flake-parts",
|
||||
"git-hooks": "git-hooks",
|
||||
"nixops4": [
|
||||
"nixops4-nixos",
|
||||
"nixops4"
|
||||
],
|
||||
"nixops4-nixos": "nixops4-nixos",
|
||||
"nixpkgs": "nixpkgs_4"
|
||||
"nixops4-nixos": "nixops4-nixos"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
|
|
|
|||
98
flake.nix
98
flake.nix
|
|
@ -1,72 +1,52 @@
|
|||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11"; # consumed by flake-parts
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
git-hooks.url = "github:cachix/git-hooks.nix";
|
||||
nixops4.follows = "nixops4-nixos/nixops4";
|
||||
nixops4-nixos.url = "github:nixops4/nixops4-nixos";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
let
|
||||
sources = import ./npins;
|
||||
inherit (sources) git-hooks agenix;
|
||||
in
|
||||
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
inputs:
|
||||
import ./mkFlake.nix inputs (
|
||||
{ inputs, sources, ... }:
|
||||
{
|
||||
imports = [
|
||||
"${sources.git-hooks}/flake-module.nix"
|
||||
inputs.nixops4.modules.flake.default
|
||||
|
||||
imports = [
|
||||
(import "${git-hooks}/flake-module.nix")
|
||||
inputs.nixops4.modules.flake.default
|
||||
./deployment/flake-part.nix
|
||||
./infra/flake-part.nix
|
||||
./keys/flake-part.nix
|
||||
./secrets/flake-part.nix
|
||||
./services/tests/flake-part.nix
|
||||
];
|
||||
|
||||
./deployment/flake-part.nix
|
||||
./infra/flake-part.nix
|
||||
];
|
||||
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
inputs',
|
||||
...
|
||||
}:
|
||||
{
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
|
||||
pre-commit.settings.hooks =
|
||||
let
|
||||
## Add a directory here if pre-commit hooks shouldn't apply to it.
|
||||
optout = [ "npins" ];
|
||||
excludes = map (dir: "^${dir}/") optout;
|
||||
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
|
||||
in
|
||||
addExcludes {
|
||||
nixfmt-rfc-style.enable = true;
|
||||
deadnix.enable = true;
|
||||
trim-trailing-whitespace.enable = true;
|
||||
shellcheck.enable = true;
|
||||
perSystem =
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
system,
|
||||
...
|
||||
}:
|
||||
{
|
||||
checks = {
|
||||
panel = (import ./. { inherit sources system; }).tests.panel.basic;
|
||||
};
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = [
|
||||
pkgs.npins
|
||||
pkgs.nil
|
||||
(pkgs.callPackage "${agenix}/pkgs/agenix.nix" { })
|
||||
pkgs.openssh
|
||||
pkgs.httpie
|
||||
pkgs.jq
|
||||
# exposing this env var as a hack to pass info in from form
|
||||
(inputs'.nixops4.packages.default.overrideAttrs {
|
||||
impureEnvVars = [ "DEPLOYMENT" ];
|
||||
})
|
||||
];
|
||||
pre-commit.settings.hooks =
|
||||
let
|
||||
## Add a directory here if pre-commit hooks shouldn't apply to it.
|
||||
optout = [ "npins" ];
|
||||
excludes = map (dir: "^${dir}/") optout;
|
||||
addExcludes = lib.mapAttrs (_: c: c // { inherit excludes; });
|
||||
in
|
||||
addExcludes {
|
||||
nixfmt-rfc-style.enable = true;
|
||||
deadnix.enable = true;
|
||||
trim-trailing-whitespace.enable = true;
|
||||
shellcheck.enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
|||
10
infra/.envrc
Normal file
10
infra/.envrc
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#!/usr/bin/env bash
|
||||
# the shebang is ignored, but nice for editors
|
||||
|
||||
# shellcheck shell=bash
|
||||
if type -P lorri &>/dev/null; then
|
||||
eval "$(lorri direnv --flake .)"
|
||||
else
|
||||
echo 'while direnv evaluated .envrc, could not find the command "lorri" [https://github.com/nix-community/lorri]'
|
||||
use flake
|
||||
fi
|
||||
|
|
@ -1,98 +1,28 @@
|
|||
# Infra
|
||||
# service deployment
|
||||
|
||||
This directory contains the definition of [the VMs](machines.md) that host our
|
||||
infrastructure.
|
||||
deploys [NixOS](https://nixos.org/) templates using [OpenTofu](https://opentofu.org/).
|
||||
|
||||
## Provisioning VMs with an initial configuration
|
||||
## requirements
|
||||
|
||||
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
|
||||
future, I will improve the provisioning script to make this a bit less clunky.
|
||||
In the far future, NixOps4 will be able to communicate with Proxmox directly and
|
||||
everything will become much cleaner.
|
||||
- [nix](https://nix.dev/)
|
||||
|
||||
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
|
||||
above 100. For instance, `fedi117`.
|
||||
## usage
|
||||
|
||||
2. Add a basic configuration for the machine. These typically go in
|
||||
`infra/machines/<name>/default.nix`. You can look at other `fediXXX` VMs to
|
||||
find inspiration. You probably do not need a `nixos.module` option at this
|
||||
point.
|
||||
### development
|
||||
|
||||
2. Add a file for each of those VM's public keys, eg.
|
||||
```
|
||||
touch keys/systems/fedi117.pub
|
||||
```
|
||||
Those files need to exist during provisioning, but their content matters only
|
||||
when updating the machines' configuration.
|
||||
|
||||
FIXME: Remove this step by making the provisioning script not fail with the
|
||||
public key does not exist yet.
|
||||
|
||||
3. Run the provisioning script:
|
||||
```
|
||||
sh infra/proxmox-provision.sh fedi117
|
||||
```
|
||||
The script can take several ids at the same time. It requires some
|
||||
authentication options and provides several more. See `--help`.
|
||||
|
||||
4. (Optional) Add a DNS entry for the machine; for instance `fedi117.abundos.eu
|
||||
A 95.215.187.117`.
|
||||
|
||||
5. Grab the public host keys for the machines in question, and add it to the
|
||||
repository. For instance:
|
||||
```
|
||||
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
|
||||
```
|
||||
|
||||
FIXME: Make the provisioning script do that for us.
|
||||
|
||||
7. Regenerate the list of machines:
|
||||
```
|
||||
sh infra/machines.md.sh
|
||||
```
|
||||
Commit it with the machine's configuration, public key, etc.
|
||||
|
||||
8. At this point, the machine contains a very basic configuration that contains
|
||||
just enough for it to boot and be reachable. Go on to the next section to
|
||||
update the machine and put an actual configuration.
|
||||
|
||||
FIXME: Figure out why the full configuration isn't on the machine at this
|
||||
point and fix it.
|
||||
|
||||
## Updating existing VM configurations
|
||||
|
||||
Their configuration can be updated via NixOps4. Run
|
||||
before using other commands, if not using direnv:
|
||||
|
||||
```sh
|
||||
nixops4 deployments list
|
||||
nix-shell
|
||||
```
|
||||
|
||||
to see the available deployments.
|
||||
This should be done from the root of the repository,
|
||||
otherwise NixOps4 will fail with something like:
|
||||
|
||||
```
|
||||
nixops4 error: evaluation: error:
|
||||
… while calling the 'getFlake' builtin
|
||||
|
||||
error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist, evaluation: error:
|
||||
… while calling the 'getFlake' builtin
|
||||
|
||||
error: path '/nix/store/05nn7krhvi8wkcyl6bsysznlv60g5rrf-source/flake.nix' does not exist
|
||||
```
|
||||
|
||||
Then, given a deployment (eg. `fedi200`), run
|
||||
then to initialize, or after updating pins or TF providers:
|
||||
|
||||
```sh
|
||||
nixops4 apply <deployment>
|
||||
setup
|
||||
```
|
||||
|
||||
Alternatively, to run the `default` deployment, which contains all the VMs, run
|
||||
## implementing
|
||||
|
||||
```sh
|
||||
nixops4 apply
|
||||
```
|
||||
|
||||
## Removing an existing VM
|
||||
|
||||
See `infra/proxmox-remove.sh --help`.
|
||||
proper documentation TODO.
|
||||
until then, a reference implementation may be found in [`panel/`](https://git.fediversity.eu/Fediversity/Fediversity/src/branch/main/panel).
|
||||
|
|
|
|||
|
|
@ -5,8 +5,9 @@ let
|
|||
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [
|
||||
./hardware.nix
|
||||
./networking.nix
|
||||
./users.nix
|
||||
];
|
||||
|
|
@ -22,4 +23,9 @@ in
|
|||
nix.extraOptions = ''
|
||||
experimental-features = nix-command flakes
|
||||
'';
|
||||
|
||||
boot.loader = {
|
||||
systemd-boot.enable = true;
|
||||
efi.canTouchEfiVariables = true;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,63 +1,64 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mkDefault;
|
||||
inherit (lib) mkDefault mkIf mkMerge;
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
config = {
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.PasswordAuthentication = false;
|
||||
};
|
||||
|
||||
networking = {
|
||||
hostName = config.fediversityVm.name;
|
||||
domain = config.fediversityVm.domain;
|
||||
networking = mkMerge [
|
||||
{
|
||||
hostName = config.fediversityVm.name;
|
||||
domain = config.fediversityVm.domain;
|
||||
|
||||
## REVIEW: Do we actually need that, considering that we have static IPs?
|
||||
useDHCP = mkDefault true;
|
||||
## REVIEW: Do we actually need that, considering that we have static IPs?
|
||||
useDHCP = mkDefault true;
|
||||
|
||||
interfaces = {
|
||||
eth0 = {
|
||||
ipv4 = {
|
||||
addresses = [
|
||||
{
|
||||
inherit (config.fediversityVm.ipv4) address prefixLength;
|
||||
}
|
||||
];
|
||||
};
|
||||
ipv6 = {
|
||||
addresses = [
|
||||
{
|
||||
inherit (config.fediversityVm.ipv6) address prefixLength;
|
||||
}
|
||||
];
|
||||
};
|
||||
## Disable the default firewall and use nftables instead, with a custom
|
||||
## Procolix-made ruleset.
|
||||
firewall.enable = false;
|
||||
nftables = {
|
||||
enable = true;
|
||||
rulesetFile = ./nftables-ruleset.nft;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
defaultGateway = {
|
||||
address = config.fediversityVm.ipv4.gateway;
|
||||
interface = "eth0";
|
||||
};
|
||||
defaultGateway6 = {
|
||||
address = config.fediversityVm.ipv6.gateway;
|
||||
interface = "eth0";
|
||||
};
|
||||
## IPv4
|
||||
(mkIf config.fediversityVm.ipv4.enable {
|
||||
interfaces.${config.fediversityVm.ipv4.interface}.ipv4.addresses = [
|
||||
{ inherit (config.fediversityVm.ipv4) address prefixLength; }
|
||||
];
|
||||
defaultGateway = {
|
||||
address = config.fediversityVm.ipv4.gateway;
|
||||
interface = config.fediversityVm.ipv4.interface;
|
||||
};
|
||||
nameservers = [
|
||||
"95.215.185.6"
|
||||
"95.215.185.7"
|
||||
];
|
||||
})
|
||||
|
||||
nameservers = [
|
||||
"95.215.185.6"
|
||||
"95.215.185.7"
|
||||
"2a00:51c0::5fd7:b906"
|
||||
"2a00:51c0::5fd7:b907"
|
||||
];
|
||||
|
||||
firewall.enable = false;
|
||||
nftables = {
|
||||
enable = true;
|
||||
rulesetFile = ./nftables-ruleset.nft;
|
||||
};
|
||||
};
|
||||
## IPv6
|
||||
(mkIf config.fediversityVm.ipv6.enable {
|
||||
interfaces.${config.fediversityVm.ipv6.interface}.ipv6.addresses = [
|
||||
{ inherit (config.fediversityVm.ipv6) address prefixLength; }
|
||||
];
|
||||
defaultGateway6 = {
|
||||
address = config.fediversityVm.ipv6.gateway;
|
||||
interface = config.fediversityVm.ipv6.interface;
|
||||
};
|
||||
nameservers = [
|
||||
"2a00:51c0::5fd7:b906"
|
||||
"2a00:51c0::5fd7:b907"
|
||||
];
|
||||
})
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,13 @@
|
|||
{
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
users.users = {
|
||||
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
|
||||
|
||||
procolix = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
|
|
|
|||
43
infra/common/node.nix
Normal file
43
infra/common/node.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) attrValues elem mkDefault;
|
||||
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
|
||||
inherit (lib.strings) removeSuffix;
|
||||
|
||||
secretsPrefix = ../secrets;
|
||||
secrets = import (secretsPrefix + "/secrets.nix");
|
||||
keys = import ../keys;
|
||||
|
||||
in
|
||||
{
|
||||
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
|
||||
|
||||
## The configuration of the machine. We strive to keep in this file only the
|
||||
## options that really need to be injected from the resource. Everything else
|
||||
## should go into the `./nixos` subdirectory.
|
||||
imports = [
|
||||
../infra/common/options.nix
|
||||
../infra/common/nixos
|
||||
];
|
||||
|
||||
## Read all the secrets, filter the ones that are supposed to be readable
|
||||
## with this host's public key, and add them correctly to the configuration
|
||||
## as `age.secrets.<name>.file`.
|
||||
age.secrets = concatMapAttrs (
|
||||
name: secret:
|
||||
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
|
||||
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
|
||||
}
|
||||
) secrets;
|
||||
|
||||
## FIXME: switch root authentication to users with password-less sudo, see #24
|
||||
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
|
||||
# allow our panel vm access to the test machines
|
||||
keys.panel
|
||||
];
|
||||
}
|
||||
|
|
@ -6,6 +6,8 @@ let
|
|||
|
||||
in
|
||||
{
|
||||
# `config` not set and imported from multiple places: no fixed module class
|
||||
|
||||
options.fediversityVm = {
|
||||
|
||||
##########################################################################
|
||||
|
|
@ -18,16 +20,13 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
proxmox = mkOption {
|
||||
type = types.nullOr (
|
||||
types.enum [
|
||||
"procolix"
|
||||
"fediversity"
|
||||
]
|
||||
);
|
||||
isFediversityVm = mkOption {
|
||||
type = types.bool;
|
||||
description = ''
|
||||
The Proxmox instance. This is used for provisioning only and should be
|
||||
set to `null` if the machine is not a VM.
|
||||
Whether the machine is a Fediversity VM or not. This is used to
|
||||
determine whether the machine should be provisioned via Proxmox or not.
|
||||
Machines that are _not_ Fediversity VM could be physical machines, or
|
||||
VMs that live outside Fediversity, eg. on Procolix's Proxmox.
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
@ -89,6 +88,17 @@ in
|
|||
};
|
||||
|
||||
ipv4 = {
|
||||
enable = mkOption {
|
||||
default = true;
|
||||
};
|
||||
|
||||
interface = mkOption {
|
||||
description = ''
|
||||
The interface that carries the machine's IPv4 network.
|
||||
'';
|
||||
default = "eth0";
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = ''
|
||||
The IP address of the machine, version 4. It will be injected as a
|
||||
|
|
@ -114,6 +124,17 @@ in
|
|||
};
|
||||
|
||||
ipv6 = {
|
||||
enable = mkOption {
|
||||
default = true;
|
||||
};
|
||||
|
||||
interface = mkOption {
|
||||
description = ''
|
||||
The interface that carries the machine's IPv6 network.
|
||||
'';
|
||||
default = "eth0";
|
||||
};
|
||||
|
||||
address = mkOption {
|
||||
description = ''
|
||||
The IP address of the machine, version 6. It will be injected as a
|
||||
|
|
|
|||
|
|
@ -1,20 +1,20 @@
|
|||
{ modulesPath, ... }:
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
|
||||
_class = "nixos";
|
||||
|
||||
## FIXME: It would be nice, but the following leads to infinite recursion
|
||||
## in the way we currently plug `sources` in.
|
||||
##
|
||||
# imports = [
|
||||
# "${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
|
||||
# ];
|
||||
|
||||
boot = {
|
||||
loader = {
|
||||
systemd-boot.enable = true;
|
||||
efi.canTouchEfiVariables = true;
|
||||
};
|
||||
|
||||
initrd = {
|
||||
availableKernelModules = [
|
||||
"ata_piix"
|
||||
"uhci_hcd"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"sd_mod"
|
||||
"sr_mod"
|
||||
];
|
||||
|
|
@ -1,6 +1,9 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
config,
|
||||
keys,
|
||||
secrets,
|
||||
...
|
||||
}:
|
||||
|
||||
|
|
@ -8,15 +11,11 @@ let
|
|||
inherit (lib) attrValues elem mkDefault;
|
||||
inherit (lib.attrsets) concatMapAttrs optionalAttrs;
|
||||
inherit (lib.strings) removeSuffix;
|
||||
sources = import ../../npins;
|
||||
inherit (sources) nixpkgs agenix disko;
|
||||
|
||||
secretsPrefix = ../../secrets;
|
||||
secrets = import (secretsPrefix + "/secrets.nix");
|
||||
keys = import ../../keys;
|
||||
|
||||
in
|
||||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
imports = [ ./options.nix ];
|
||||
|
||||
fediversityVm.hostPublicKey = mkDefault keys.systems.${config.fediversityVm.name};
|
||||
|
|
@ -26,38 +25,39 @@ in
|
|||
hostPublicKey = config.fediversityVm.hostPublicKey;
|
||||
};
|
||||
|
||||
inherit nixpkgs;
|
||||
inherit (inputs) nixpkgs;
|
||||
|
||||
## The configuration of the machine. We strive to keep in this file only the
|
||||
## options that really need to be injected from the resource. Everything else
|
||||
## should go into the `./nixos` subdirectory.
|
||||
nixos.module = {
|
||||
imports = [
|
||||
(import "${agenix}/modules/age.nix")
|
||||
(import "${disko}/module.nix")
|
||||
./options.nix
|
||||
./nixos
|
||||
./proxmox-qemu-vm.nix
|
||||
];
|
||||
|
||||
## Inject the shared options from the resource's `config` into the NixOS
|
||||
## configuration.
|
||||
fediversityVm = config.fediversityVm;
|
||||
|
||||
## Read all the secrets, filter the ones that are supposed to be readable
|
||||
## with this host's public key, and add them correctly to the configuration
|
||||
## as `age.secrets.<name>.file`.
|
||||
## Read all the secrets, filter the ones that are supposed to be readable with
|
||||
## public key, and create a mapping from `<name>.file` to the absolute path of
|
||||
## the secret's file.
|
||||
age.secrets = concatMapAttrs (
|
||||
name: secret:
|
||||
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) ({
|
||||
${removeSuffix ".age" name}.file = secretsPrefix + "/${name}";
|
||||
})
|
||||
) secrets;
|
||||
optionalAttrs (elem config.fediversityVm.hostPublicKey secret.publicKeys) {
|
||||
${removeSuffix ".age" name}.file = secrets.rootPath + "/${name}";
|
||||
}
|
||||
) secrets.mapping;
|
||||
|
||||
## FIXME: Remove direct root authentication once the NixOps4 NixOS provider
|
||||
## supports users with password-less sudo.
|
||||
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
|
||||
# allow our panel vm access to the test machines
|
||||
keys.panel
|
||||
# allow continuous deployment access
|
||||
keys.cd
|
||||
];
|
||||
|
||||
};
|
||||
|
|
|
|||
26
infra/common/shared.nix
Normal file
26
infra/common/shared.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (config.terraform) hostname domain initialUser;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
<disko/module.nix>
|
||||
<agenix/modules/age.nix>
|
||||
../services/fediversity
|
||||
./node.nix
|
||||
];
|
||||
fediversityVm.name = hostname;
|
||||
fediversity = {
|
||||
inherit domain;
|
||||
temp.initialUser = {
|
||||
inherit (initialUser) username email displayName;
|
||||
# FIXME: disgusting, but nvm, this is going to be replaced by
|
||||
# proper central authentication at some point
|
||||
passwordFile = pkgs.writeText "password" initialUser.password;
|
||||
};
|
||||
};
|
||||
}
|
||||
34
infra/default.nix
Normal file
34
infra/default.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
system ? builtins.currentSystem,
|
||||
sources ? import ../npins,
|
||||
pkgs ? import sources.nixpkgs { inherit system; },
|
||||
}:
|
||||
let
|
||||
inherit (pkgs) lib;
|
||||
setup = pkgs.writeScriptBin "setup" ''
|
||||
echo '${lib.strings.toJSON sources}' > .npins.json
|
||||
rm -f .terraform.lock.hcl
|
||||
rm -rf .terraform/
|
||||
tofu init
|
||||
'';
|
||||
in
|
||||
{
|
||||
# shell for testing TF directly
|
||||
shell = pkgs.mkShellNoCC {
|
||||
packages = [
|
||||
(import ./tf.nix { inherit lib pkgs; })
|
||||
pkgs.jaq
|
||||
setup
|
||||
];
|
||||
};
|
||||
|
||||
tests = pkgs.callPackage ./tests.nix { };
|
||||
|
||||
# re-export inputs so they can be overridden granularly
|
||||
# (they can't be accessed from the outside any other way)
|
||||
inherit
|
||||
sources
|
||||
system
|
||||
pkgs
|
||||
;
|
||||
}
|
||||
|
|
@ -1,49 +1,53 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
sources,
|
||||
keys,
|
||||
secrets,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins) readDir readFile fromJSON;
|
||||
inherit (builtins) readDir;
|
||||
inherit (lib)
|
||||
attrNames
|
||||
mkOption
|
||||
evalModules
|
||||
filterAttrs
|
||||
mapAttrs'
|
||||
deepSeq
|
||||
;
|
||||
inherit (lib.attrsets) genAttrs;
|
||||
sources = import ../../npins;
|
||||
|
||||
## Given a machine's name and whether it is a test VM, make a resource module,
|
||||
## except for its missing provider. (Depending on the use of that resource, we
|
||||
## will provide a different one.)
|
||||
makeResourceModule =
|
||||
{ vmName, isTestVm }:
|
||||
{
|
||||
imports =
|
||||
[
|
||||
./common/resource.nix
|
||||
]
|
||||
++ (
|
||||
if isTestVm then
|
||||
[
|
||||
./test-machines/${vmName}
|
||||
{
|
||||
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
|
||||
# allow our panel vm access to the test machines
|
||||
(import ../keys).panel
|
||||
];
|
||||
}
|
||||
]
|
||||
else
|
||||
[
|
||||
./machines/${vmName}
|
||||
]
|
||||
);
|
||||
fediversityVm.name = vmName;
|
||||
commonResourceModule = {
|
||||
# TODO(@fricklerhandwerk): this is terrible but IMO we should just ditch
|
||||
# flake-parts and have our own data model for how the project is organised
|
||||
# internally
|
||||
_module.args = {
|
||||
inherit
|
||||
inputs
|
||||
keys
|
||||
secrets
|
||||
sources
|
||||
;
|
||||
};
|
||||
|
||||
## FIXME: It would be preferrable to have those `sources`-related imports in
|
||||
## the modules that use them. However, doing so triggers infinite recursions
|
||||
## because of the way we propagate `sources`. `sources` must be propagated by
|
||||
## means of `specialArgs`, but this requires a bigger change.
|
||||
nixos.module.imports = [
|
||||
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
|
||||
"${sources.agenix}/modules/age.nix"
|
||||
"${sources.disko}/module.nix"
|
||||
"${sources.home-manager}/nixos"
|
||||
];
|
||||
|
||||
imports = [
|
||||
./common/resource.nix
|
||||
];
|
||||
};
|
||||
|
||||
## Given a list of machine names, make a deployment with those machines'
|
||||
## configurations as resources.
|
||||
makeDeployment =
|
||||
|
|
@ -55,10 +59,8 @@ let
|
|||
type = providers.local.exec;
|
||||
imports = [
|
||||
inputs.nixops4-nixos.modules.nixops4Resource.nixos
|
||||
(makeResourceModule {
|
||||
inherit vmName;
|
||||
isTestVm = false;
|
||||
})
|
||||
commonResourceModule
|
||||
../machines/dev/${vmName}
|
||||
];
|
||||
});
|
||||
};
|
||||
|
|
@ -75,21 +77,29 @@ let
|
|||
fediversity = import ../services/fediversity;
|
||||
}
|
||||
{
|
||||
garageConfigurationResource = makeResourceModule {
|
||||
vmName = "test01";
|
||||
isTestVm = true;
|
||||
garageConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test01
|
||||
];
|
||||
};
|
||||
mastodonConfigurationResource = makeResourceModule {
|
||||
vmName = "test06"; # somehow `test02` has a problem - use test06 instead
|
||||
isTestVm = true;
|
||||
mastodonConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test06 # somehow `test02` has a problem - use test06 instead
|
||||
];
|
||||
};
|
||||
peertubeConfigurationResource = makeResourceModule {
|
||||
vmName = "test05";
|
||||
isTestVm = true;
|
||||
peertubeConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test05
|
||||
];
|
||||
};
|
||||
pixelfedConfigurationResource = makeResourceModule {
|
||||
vmName = "test04";
|
||||
isTestVm = true;
|
||||
pixelfedConfigurationResource = {
|
||||
imports = [
|
||||
commonResourceModule
|
||||
../machines/operator/test04
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -102,56 +112,65 @@ let
|
|||
## this is only needed to expose NixOS configurations for provisioning
|
||||
## purposes, and eventually all of this should be handled by NixOps4.
|
||||
options = {
|
||||
nixos.module = mkOption { }; # NOTE: not just `nixos` otherwise merging will go wrong
|
||||
nixos.module = mkOption { type = lib.types.deferredModule; }; # NOTE: not just `nixos` otherwise merging will go wrong
|
||||
nixpkgs = mkOption { };
|
||||
ssh = mkOption { };
|
||||
};
|
||||
};
|
||||
|
||||
makeResourceConfig =
|
||||
vm:
|
||||
{ vmName, isTestVm }:
|
||||
(evalModules {
|
||||
modules = [
|
||||
nixops4ResourceNixosMockOptions
|
||||
(makeResourceModule vm)
|
||||
commonResourceModule
|
||||
(if isTestVm then ../machines/operator/${vmName} else ../machines/dev/${vmName})
|
||||
];
|
||||
}).config;
|
||||
|
||||
## Given a VM name, make a NixOS configuration for this machine.
|
||||
makeConfiguration =
|
||||
isTestVm: vmName:
|
||||
let
|
||||
inherit (sources) nixpkgs;
|
||||
in
|
||||
import "${nixpkgs}/nixos" {
|
||||
modules = [
|
||||
(makeResourceConfig { inherit vmName isTestVm; }).nixos.module
|
||||
];
|
||||
import "${sources.nixpkgs}/nixos" {
|
||||
configuration = (makeResourceConfig { inherit vmName isTestVm; }).nixos.module;
|
||||
system = "x86_64-linux";
|
||||
};
|
||||
|
||||
makeVmOptions = isTestVm: vmName: {
|
||||
inherit ((makeResourceConfig { inherit vmName isTestVm; }).fediversityVm)
|
||||
proxmox
|
||||
vmId
|
||||
description
|
||||
|
||||
sockets
|
||||
cores
|
||||
memory
|
||||
diskSize
|
||||
|
||||
hostPublicKey
|
||||
unsafeHostPrivateKey
|
||||
;
|
||||
};
|
||||
makeVmOptions =
|
||||
isTestVm: vmName:
|
||||
let
|
||||
config = (makeResourceConfig { inherit vmName isTestVm; }).fediversityVm;
|
||||
in
|
||||
if config.isFediversityVm then
|
||||
{
|
||||
inherit (config)
|
||||
vmId
|
||||
description
|
||||
sockets
|
||||
cores
|
||||
memory
|
||||
diskSize
|
||||
hostPublicKey
|
||||
unsafeHostPrivateKey
|
||||
;
|
||||
}
|
||||
else
|
||||
null;
|
||||
|
||||
listSubdirectories = path: attrNames (filterAttrs (_: type: type == "directory") (readDir path));
|
||||
|
||||
machines = listSubdirectories ./machines;
|
||||
testMachines = listSubdirectories ./test-machines;
|
||||
machines = listSubdirectories ../machines/dev;
|
||||
|
||||
nixosConfigurations =
|
||||
genAttrs machines (makeConfiguration false);
|
||||
vmOptions =
|
||||
filterAttrs (_: value: value != null) # Filter out non-Fediversity VMs
|
||||
(genAttrs machines (makeVmOptions false));
|
||||
|
||||
in
|
||||
{
|
||||
_class = "flake";
|
||||
|
||||
## - Each normal or test machine gets a NixOS configuration.
|
||||
## - Each normal or test machine gets a VM options entry.
|
||||
## - Each normal machine gets a deployment.
|
||||
|
|
@ -159,22 +178,24 @@ in
|
|||
## - We add a “test” deployment with all test machines.
|
||||
nixops4Deployments = genAttrs machines makeDeployment' // {
|
||||
default = makeDeployment machines;
|
||||
test = makeTestDeployment (
|
||||
fromJSON (
|
||||
let
|
||||
env = builtins.getEnv "DEPLOYMENT";
|
||||
in
|
||||
if env != "" then
|
||||
env
|
||||
else
|
||||
builtins.trace "env var DEPLOYMENT not set, falling back to ./test-machines/configuration.json!" (readFile ./test-machines/configuration.json)
|
||||
)
|
||||
);
|
||||
};
|
||||
flake.nixosConfigurations =
|
||||
genAttrs machines (makeConfiguration false)
|
||||
// genAttrs testMachines (makeConfiguration true);
|
||||
flake.vmOptions =
|
||||
genAttrs machines (makeVmOptions false)
|
||||
// genAttrs testMachines (makeVmOptions true);
|
||||
flake = { inherit nixosConfigurations vmOptions; };
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
checks =
|
||||
mapAttrs' (name: nixosConfiguration: {
|
||||
name = "nixosConfigurations-${name}";
|
||||
value = nixosConfiguration.config.system.build.toplevel;
|
||||
}) nixosConfigurations
|
||||
// mapAttrs' (name: vmOptions: {
|
||||
name = "vmOptions-${name}";
|
||||
## Check that VM options builds/evaluates correctly. `deepSeq e1
|
||||
## e2` evaluates `e1` strictly in depth before returning `e2`. We
|
||||
## use this trick because checks need to be derivations, which VM
|
||||
## options are not.
|
||||
value = deepSeq vmOptions pkgs.hello;
|
||||
}) vmOptions;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
159
infra/operator/main.tf
Normal file
159
infra/operator/main.tf
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
locals {
|
||||
system = "x86_64-linux"
|
||||
# dependency paths pre-calculated from npins
|
||||
pins = jsondecode(file("${path.root}/.npins.json"))
|
||||
# nix path: expose pins, use nixpkgs in flake commands (`nix run`)
|
||||
nix_path = "${join(":", [for name, path in local.pins : "${name}=${path}"])}:flake=${local.pins["nixpkgs"]}:flake"
|
||||
# user-facing applications
|
||||
application_configs = {
|
||||
# FIXME: wrap applications at the interface to grab them in one go?
|
||||
mastodon = {
|
||||
cfg = var.mastodon
|
||||
hostname = "test06"
|
||||
}
|
||||
pixelfed = {
|
||||
cfg = var.pixelfed
|
||||
hostname = "test04"
|
||||
}
|
||||
peertube = {
|
||||
cfg = var.peertube
|
||||
hostname = "test05"
|
||||
}
|
||||
}
|
||||
# services shared between applications
|
||||
peripherals = { for name, inst in {
|
||||
garage = "test01"
|
||||
} : name => {
|
||||
hostname = inst
|
||||
cfg = {
|
||||
# enable if any user applications are enabled
|
||||
enable = anytrue([for _, app in local.application_configs: try(app.cfg.enable, false)])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# hash of our code directory, used in dev to trigger re-deploy
|
||||
# FIXME settle for pwd when in /nix/store?
|
||||
# FIXME calculate separately to reduce false positives
|
||||
data "external" "hash" {
|
||||
program = ["sh", "-c", "echo \"{\\\"hash\\\":\\\"$(nix-hash ..)\\\"}\""]
|
||||
}
|
||||
|
||||
# TF resource to build and deploy NixOS instances.
|
||||
resource "terraform_data" "nixos" {
|
||||
|
||||
for_each = {for name, inst in merge(
|
||||
local.peripherals,
|
||||
local.application_configs,
|
||||
) : name => inst if try(inst.cfg.enable, false)}
|
||||
|
||||
# trigger rebuild/deploy if (FIXME?) any potentially used config/code changed,
|
||||
# preventing these (20+s, build being bottleneck) when nothing changed.
|
||||
# terraform-nixos separates these to only deploy if instantiate changed,
|
||||
# yet building even then - which may be not as bad using deploy on remote.
|
||||
# having build/deploy one resource reflects wanting to prevent no-op rebuilds
|
||||
# over preventing (with less false positives) no-op deployments,
|
||||
# as i could not find a way to do prevent no-op rebuilds without merging them:
|
||||
# - generic resources cannot have outputs, while we want info from the instantiation (unless built on host?).
|
||||
# - `data` always runs, which is slow for deploy and especially build.
|
||||
triggers_replace = [
|
||||
data.external.hash.result,
|
||||
var.domain,
|
||||
var.initialUser,
|
||||
local.system,
|
||||
each.key,
|
||||
each.value,
|
||||
]
|
||||
|
||||
provisioner "local-exec" {
|
||||
# directory to run the script from. we use the TF project root dir,
|
||||
# here as a path relative from where TF is run from.
|
||||
# note that absolute paths can cause false positives in triggers,
|
||||
# so are generally discouraged in TF.
|
||||
working_dir = path.root
|
||||
environment = {
|
||||
# nix path used on build, lets us refer to e.g. nixpkgs like `<nixpkgs>`
|
||||
NIX_PATH = local.nix_path
|
||||
}
|
||||
# TODO: refactor back to command="ignoreme" interpreter=concat([]) to protect sensitive data from error logs?
|
||||
# TODO: build on target?
|
||||
command = <<-EOF
|
||||
set -euo pipefail
|
||||
|
||||
# INSTANTIATE
|
||||
command=(
|
||||
nix-instantiate
|
||||
--expr
|
||||
'let
|
||||
os = import <nixpkgs/nixos> {
|
||||
system = "${local.system}";
|
||||
configuration = {
|
||||
# note interpolations here TF ones
|
||||
imports = [
|
||||
# shared NixOS config
|
||||
${path.root}/shared.nix
|
||||
# FIXME: separate template options by service
|
||||
${path.root}/options.nix
|
||||
# for service `mastodon` import `mastodon.nix`
|
||||
${path.root}/../../machines/operator/${inst.hostname}/${each.key}.nix
|
||||
# FIXME: get VM details from TF
|
||||
${path.root}/../../machines/operator/${inst.hostname}
|
||||
];
|
||||
# nix path for debugging
|
||||
nix.nixPath = [ "${local.nix_path}" ];
|
||||
## FIXME: switch root authentication to users with password-less sudo, see #24
|
||||
users.users.root.openssh.authorizedKeys.keys = let
|
||||
keys = import ../keys;
|
||||
in attrValues keys.contributors ++ [
|
||||
# allow our panel vm access to the test machines
|
||||
keys.panel
|
||||
];
|
||||
} //
|
||||
# template parameters passed in from TF thru json
|
||||
builtins.fromJSON "${replace(jsonencode({
|
||||
terraform = {
|
||||
domain = var.domain
|
||||
hostname = each.value.hostname
|
||||
initialUser = var.initialUser
|
||||
}
|
||||
}), "\"", "\\\"")}";
|
||||
};
|
||||
in
|
||||
# info we want to get back out
|
||||
{
|
||||
substituters = builtins.concatStringsSep " " os.config.nix.settings.substituters;
|
||||
trusted_public_keys = builtins.concatStringsSep " " os.config.nix.settings.trusted-public-keys;
|
||||
drv_path = os.config.system.build.toplevel.drvPath;
|
||||
out_path = os.config.system.build.toplevel;
|
||||
}'
|
||||
)
|
||||
# instantiate the config in /nix/store
|
||||
"$${command[@]}" -A out_path
|
||||
# get the other info
|
||||
json="$("$${command[@]}" --eval --strict --json)"
|
||||
|
||||
# DEPLOY
|
||||
declare substituters trusted_public_keys drv_path
|
||||
# set our variables using the json object
|
||||
eval "export $(echo $json | jaq -r 'to_entries | map("\(.key)=\(.value)") | @sh')"
|
||||
# FIXME: de-hardcode domain
|
||||
host="root@${each.value.hostname}.abundos.eu" # FIXME: #24
|
||||
buildArgs=(
|
||||
--option extra-binary-caches https://cache.nixos.org/
|
||||
--option substituters $substituters
|
||||
--option trusted-public-keys $trusted_public_keys
|
||||
)
|
||||
sshOpts=(
|
||||
-o BatchMode=yes
|
||||
-o StrictHostKeyChecking=no
|
||||
)
|
||||
# get the realized derivation to deploy
|
||||
outPath=$(nix-store --realize "$drv_path" "$${buildArgs[@]}")
|
||||
# deploy the config by nix-copy-closure
|
||||
NIX_SSHOPTS="$${sshOpts[*]}" nix-copy-closure --to "$host" "$outPath" --gzip --use-substitutes
|
||||
# switch the remote host to the config
|
||||
ssh "$${sshOpts[@]}" "$host" "nix-env --profile /nix/var/nix/profiles/system --set $outPath; $outPath/bin/switch-to-configuration switch"
|
||||
EOF
|
||||
}
|
||||
}
|
||||
54
infra/operator/options.nix
Normal file
54
infra/operator/options.nix
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
# TODO: could (part of) this be generated somehow? c.f #275
|
||||
{
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types mkOption;
|
||||
inherit (types) str enum submodule;
|
||||
in
|
||||
{
|
||||
options.terraform = {
|
||||
domain = mkOption {
|
||||
type = enum [
|
||||
"fediversity.net"
|
||||
];
|
||||
description = ''
|
||||
Apex domain under which the services will be deployed.
|
||||
'';
|
||||
default = "fediversity.net";
|
||||
};
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
Internal name of the host, e.g. test01
|
||||
'';
|
||||
};
|
||||
initialUser = mkOption {
|
||||
description = ''
|
||||
Some services require an initial user to access them.
|
||||
This option sets the credentials for such an initial user.
|
||||
'';
|
||||
type = submodule {
|
||||
options = {
|
||||
displayName = mkOption {
|
||||
type = str;
|
||||
description = "Display name of the user";
|
||||
};
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "Username for login";
|
||||
};
|
||||
email = mkOption {
|
||||
type = str;
|
||||
description = "User's email address";
|
||||
};
|
||||
password = mkOption {
|
||||
type = str;
|
||||
description = "Password for login";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
51
infra/operator/variables.tf
Normal file
51
infra/operator/variables.tf
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# TODO: (partially) generate, say from nix modules, c.f. #275
|
||||
|
||||
variable "domain" {
|
||||
type = string
|
||||
default = "fediversity.net"
|
||||
}
|
||||
|
||||
variable "mastodon" {
|
||||
type = object({
|
||||
enable = bool
|
||||
})
|
||||
default = {
|
||||
enable = false
|
||||
}
|
||||
}
|
||||
|
||||
variable "pixelfed" {
|
||||
type = object({
|
||||
enable = bool
|
||||
})
|
||||
default = {
|
||||
enable = false
|
||||
}
|
||||
}
|
||||
|
||||
variable "peertube" {
|
||||
type = object({
|
||||
enable = bool
|
||||
})
|
||||
default = {
|
||||
enable = false
|
||||
}
|
||||
}
|
||||
|
||||
variable "initialUser" {
|
||||
type = object({
|
||||
displayName = string
|
||||
username = string
|
||||
email = string
|
||||
# TODO: mark (nested) credentials as sensitive
|
||||
# https://discuss.hashicorp.com/t/is-it-possible-to-mark-an-attribute-of-an-object-as-sensitive/24649/2
|
||||
password = string
|
||||
})
|
||||
# FIXME: remove default when the form provides this value, see #285
|
||||
default = {
|
||||
displayName = "Testy McTestface"
|
||||
username = "test"
|
||||
email = "test@test.com"
|
||||
password = "testtest"
|
||||
}
|
||||
}
|
||||
1
infra/shell.nix
Normal file
1
infra/shell.nix
Normal file
|
|
@ -0,0 +1 @@
|
|||
(import ./. { }).shell
|
||||
26
infra/tests.nix
Normal file
26
infra/tests.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{ lib, pkgs }:
|
||||
let
|
||||
defaults = {
|
||||
virtualisation = {
|
||||
memorySize = 2048;
|
||||
cores = 2;
|
||||
};
|
||||
};
|
||||
tf = pkgs.callPackage ./tf.nix { };
|
||||
tfEnv = pkgs.callPackage ./tf-env.nix { };
|
||||
in
|
||||
lib.mapAttrs (name: test: pkgs.testers.runNixOSTest (test // { inherit name; })) {
|
||||
tf-validate = {
|
||||
inherit defaults;
|
||||
nodes.server = {
|
||||
environment.systemPackages = [
|
||||
tf
|
||||
tfEnv
|
||||
];
|
||||
};
|
||||
testScript = ''
|
||||
server.wait_for_unit("multi-user.target")
|
||||
server.succeed("${lib.getExe tf} -chdir='${tfEnv}/launch' validate")
|
||||
'';
|
||||
};
|
||||
}
|
||||
34
infra/tf-env.nix
Normal file
34
infra/tf-env.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
sources ? import ../npins,
|
||||
...
|
||||
}:
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "tf-repo";
|
||||
src =
|
||||
with lib.fileset;
|
||||
toSource {
|
||||
root = ../.;
|
||||
# don't copy ignored files
|
||||
fileset = intersection (gitTracked ../.) ../.;
|
||||
};
|
||||
buildInputs = [
|
||||
(import ./tf.nix { inherit lib pkgs; })
|
||||
];
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
pushd infra
|
||||
# calculated pins
|
||||
echo '${lib.strings.toJSON sources}' > .npins.json
|
||||
# generate TF lock for nix's TF providers
|
||||
tofu init -input=false
|
||||
popd
|
||||
runHook postBuild
|
||||
'';
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
cp -r . $out
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
||||
24
infra/tf.nix
Normal file
24
infra/tf.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# FIXME: use overlays so this gets imported just once?
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
tofuProvider =
|
||||
provider:
|
||||
provider.override (oldArgs: {
|
||||
provider-source-address =
|
||||
lib.replaceStrings [ "https://registry.terraform.io/providers" ] [ "registry.opentofu.org" ]
|
||||
oldArgs.homepage;
|
||||
});
|
||||
tf = pkgs.opentofu;
|
||||
tfPlugins = (
|
||||
p: [
|
||||
p.external
|
||||
]
|
||||
);
|
||||
in
|
||||
# tf.withPlugins tfPlugins
|
||||
# https://github.com/NixOS/nixpkgs/pull/358522
|
||||
tf.withPlugins (p: pkgs.lib.lists.map tofuProvider (tfPlugins p))
|
||||
1
keys/cd-ssh-key.pub
Normal file
1
keys/cd-ssh-key.pub
Normal file
|
|
@ -0,0 +1 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMlsYTtMx3hFO8B5B8iHaXL2JKj9izHeC+/AMhIWXBPs cd-age
|
||||
|
|
@ -35,4 +35,5 @@ in
|
|||
contributors = collectKeys ./contributors;
|
||||
systems = collectKeys ./systems;
|
||||
panel = removeTrailingWhitespace (readFile ./panel-ssh-key.pub);
|
||||
cd = removeTrailingWhitespace (readFile ./cd-ssh-key.pub);
|
||||
}
|
||||
|
|
|
|||
5
keys/flake-part.nix
Normal file
5
keys/flake-part.nix
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
_class = "flake";
|
||||
|
||||
_module.args.keys = import ./.;
|
||||
}
|
||||
1
keys/systems/forgejo-ci.pub
Normal file
1
keys/systems/forgejo-ci.pub
Normal file
|
|
@ -0,0 +1 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFXQW5fxJoNY9wtTMsNExgbAbvyljIRGBLjY+USh/0A
|
||||
4
machines/README.md
Normal file
4
machines/README.md
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# Machines
|
||||
|
||||
This directory contains the definition of [the VMs](machines.md) that host our
|
||||
infrastructure.
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "fedi200";
|
||||
isFediversityVm = true;
|
||||
vmId = 200;
|
||||
proxmox = "fediversity";
|
||||
description = "Testing machine for Hans";
|
||||
|
||||
domain = "abundos.eu";
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "fedi201";
|
||||
isFediversityVm = true;
|
||||
vmId = 201;
|
||||
proxmox = "fediversity";
|
||||
description = "FediPanel";
|
||||
|
||||
domain = "abundos.eu";
|
||||
|
|
@ -6,6 +6,8 @@ let
|
|||
name = "panel";
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [
|
||||
(import ../../../panel { }).module
|
||||
];
|
||||
|
|
@ -37,6 +39,24 @@ in
|
|||
enable = true;
|
||||
production = true;
|
||||
domain = "demo.fediversity.eu";
|
||||
# FIXME: make it work without this duplication
|
||||
settings =
|
||||
let
|
||||
cfg = config.services.${name};
|
||||
in
|
||||
{
|
||||
STATIC_ROOT = "/var/lib/${name}/static";
|
||||
DEBUG = false;
|
||||
ALLOWED_HOSTS = [
|
||||
cfg.domain
|
||||
cfg.host
|
||||
"localhost"
|
||||
"[::1]"
|
||||
];
|
||||
CSRF_TRUSTED_ORIGINS = [ "https://${cfg.domain}" ];
|
||||
COMPRESS_OFFLINE = true;
|
||||
LIBSASS_OUTPUT_STYLE = "compressed";
|
||||
};
|
||||
secrets = {
|
||||
SECRET_KEY = config.age.secrets.panel-secret-key.path;
|
||||
};
|
||||
72
machines/dev/forgejo-ci/default.nix
Normal file
72
machines/dev/forgejo-ci/default.nix
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
{ lib, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mkDefault mkForce;
|
||||
in
|
||||
|
||||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
# NOTE: This needs an SSH config entry `forgejo-ci` to locate and access the
|
||||
# machine. This is because different people access the machine in different
|
||||
# way (eg. via a proxy vs. via Procolix's VPN). This might look like:
|
||||
#
|
||||
# Host forgejo-ci
|
||||
# HostName 45.142.234.216
|
||||
# HostKeyAlias forgejo-ci
|
||||
#
|
||||
# The `HostKeyAlias` statement is crucial. Without it, deployment will fail
|
||||
# with the SSH error “Host key verification failed”.
|
||||
ssh.host = mkForce "forgejo-ci";
|
||||
|
||||
fediversityVm = {
|
||||
name = "forgejo-ci";
|
||||
domain = "procolix.com";
|
||||
isFediversityVm = false;
|
||||
|
||||
ipv4 = {
|
||||
interface = "enp1s0f0";
|
||||
address = "192.168.201.65";
|
||||
prefixLength = 24;
|
||||
gateway = "192.168.201.1";
|
||||
};
|
||||
ipv6.enable = false;
|
||||
};
|
||||
|
||||
nixos.module =
|
||||
{ config, ... }:
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
imports = [
|
||||
./forgejo-actions-runner.nix
|
||||
];
|
||||
|
||||
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
|
||||
|
||||
networking = {
|
||||
nftables.enable = mkForce false;
|
||||
hostId = "1d6ea552";
|
||||
};
|
||||
|
||||
## NOTE: This is a physical machine, so is not covered by disko
|
||||
fileSystems."/" = lib.mkForce {
|
||||
device = "rpool/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/home" = {
|
||||
device = "rpool/home";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" = lib.mkForce {
|
||||
device = "/dev/disk/by-uuid/50B2-DD3F";
|
||||
fsType = "vfat";
|
||||
options = [
|
||||
"fmask=0077"
|
||||
"dmask=0077"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
47
machines/dev/forgejo-ci/forgejo-actions-runner.nix
Normal file
47
machines/dev/forgejo-ci/forgejo-actions-runner.nix
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
{ pkgs, config, ... }:
|
||||
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
services.gitea-actions-runner = {
|
||||
package = pkgs.forgejo-actions-runner;
|
||||
|
||||
instances.default = {
|
||||
enable = true;
|
||||
|
||||
name = config.networking.fqdn;
|
||||
url = "https://git.fediversity.eu";
|
||||
tokenFile = config.age.secrets.forgejo-runner-token.path;
|
||||
|
||||
settings = {
|
||||
log.level = "info";
|
||||
runner = {
|
||||
file = ".runner";
|
||||
# Take only 1 job at a time to avoid clashing NixOS tests, see #362
|
||||
capacity = 1;
|
||||
timeout = "3h";
|
||||
insecure = false;
|
||||
fetch_timeout = "5s";
|
||||
fetch_interval = "2s";
|
||||
};
|
||||
};
|
||||
|
||||
## This runner supports Docker (with a default Ubuntu image) and native
|
||||
## modes. In native mode, it contains a few default packages.
|
||||
labels = [
|
||||
"docker:docker://node:16-bullseye"
|
||||
"native:host"
|
||||
];
|
||||
|
||||
hostPackages = with pkgs; [
|
||||
bash
|
||||
git
|
||||
nix
|
||||
nodejs
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
## For the Docker mode of the runner.
|
||||
virtualisation.docker.enable = true;
|
||||
}
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "vm02116";
|
||||
isFediversityVm = false;
|
||||
vmId = 2116;
|
||||
proxmox = "procolix";
|
||||
description = "Forgejo";
|
||||
|
||||
ipv4.address = "185.206.232.34";
|
||||
|
|
@ -5,6 +5,8 @@ let
|
|||
|
||||
in
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "vm02187";
|
||||
isFediversityVm = false;
|
||||
vmId = 2187;
|
||||
proxmox = "procolix";
|
||||
description = "Wiki";
|
||||
|
||||
ipv4.address = "185.206.232.187";
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
_class = "nixos";
|
||||
|
||||
services.phpfpm.pools.mediawiki.phpOptions = ''
|
||||
upload_max_filesize = 1024M;
|
||||
post_max_size = 1024M;
|
||||
|
|
@ -7,9 +7,10 @@ Currently, this repository keeps track of the following VMs:
|
|||
|
||||
Machine | Proxmox | Description
|
||||
--------|---------|-------------
|
||||
[`fedi200`](./fedi200) | fediversity | Testing machine for Hans
|
||||
[`fedi201`](./fedi201) | fediversity | FediPanel
|
||||
[`vm02116`](./vm02116) | procolix | Forgejo
|
||||
[`vm02187`](./vm02187) | procolix | Wiki
|
||||
[`fedi200`](./dev/fedi200) | fediversity | Testing machine for Hans
|
||||
[`fedi201`](./dev/fedi201) | fediversity | FediPanel
|
||||
[`vm02116`](./dev/vm02116) | procolix | Forgejo
|
||||
[`vm02187`](./dev/vm02187) | procolix | Wiki
|
||||
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
|
||||
|
||||
This table excludes all machines with names starting with `test`.
|
||||
|
|
@ -20,7 +20,7 @@ vmOptions=$(
|
|||
cd ..
|
||||
nix eval \
|
||||
--impure --raw --expr "
|
||||
builtins.toJSON (builtins.getFlake (builtins.toString ./.)).vmOptions
|
||||
builtins.toJSON (builtins.getFlake (builtins.toString ../.)).vmOptions
|
||||
" \
|
||||
--log-format raw --quiet
|
||||
)
|
||||
|
|
@ -32,11 +32,12 @@ for machine in $(echo "$vmOptions" | jq -r 'keys[]'); do
|
|||
description=$(echo "$vmOptions" | jq -r ".$machine.description" | head -n 1)
|
||||
|
||||
# shellcheck disable=SC2016
|
||||
printf '[`%s`](./%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
|
||||
printf '[`%s`](./dev/%s) | %s | %s\n' "$machine" "$machine" "$proxmox" "$description"
|
||||
fi
|
||||
done
|
||||
|
||||
cat <<\EOF
|
||||
| `forgejo-ci` | n/a (physical) | Forgejo actions runner |
|
||||
|
||||
This table excludes all machines with names starting with `test`.
|
||||
EOF
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test01";
|
||||
isFediversityVm = true;
|
||||
vmId = 7001;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
34
machines/operator/test01/garage.nix
Normal file
34
machines/operator/test01/garage.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{ pkgs, ... }:
|
||||
let
|
||||
## NOTE: All of these secrets are publicly available in this source file
|
||||
## and will end up in the Nix store. We don't care as they are only ever
|
||||
## used for testing anyway.
|
||||
##
|
||||
## FIXME: Generate and store in NixOps4's state.
|
||||
mastodonS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
|
||||
};
|
||||
peertubeS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
|
||||
};
|
||||
pixelfedS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
|
||||
};
|
||||
in
|
||||
{
|
||||
fediversity = {
|
||||
garage.enable = true;
|
||||
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
|
||||
mastodon = mastodonS3KeyConfig { inherit pkgs; };
|
||||
peertube = peertubeS3KeyConfig { inherit pkgs; };
|
||||
};
|
||||
}
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test02";
|
||||
isFediversityVm = true;
|
||||
vmId = 7002;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test03";
|
||||
isFediversityVm = true;
|
||||
vmId = 7003;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test04";
|
||||
isFediversityVm = true;
|
||||
vmId = 7004;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
16
machines/operator/test04/pixelfed.nix
Normal file
16
machines/operator/test04/pixelfed.nix
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
{ pkgs, ... }:
|
||||
let
|
||||
pixelfedS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
|
||||
};
|
||||
in
|
||||
{
|
||||
fediversity = {
|
||||
pixelfed = pixelfedS3KeyConfig { inherit pkgs; } // {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test05";
|
||||
isFediversityVm = true;
|
||||
vmId = 7005;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
20
machines/operator/test05/peertube.nix
Normal file
20
machines/operator/test05/peertube.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{ pkgs, ... }:
|
||||
let
|
||||
peertubeS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK1f9feea9960f6f95ff404c9b";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7295c4201966a02c2c3d25b5cea4a5ff782966a2415e3a196f91924631191395";
|
||||
};
|
||||
in
|
||||
{
|
||||
fediversity = {
|
||||
peertube = peertubeS3KeyConfig { inherit pkgs; } // {
|
||||
enable = true;
|
||||
## NOTE: Only ever used for testing anyway.
|
||||
##
|
||||
## FIXME: Generate and store in NixOps4's state.
|
||||
secretsFile = pkgs.writeText "secret" "574e093907d1157ac0f8e760a6deb1035402003af5763135bae9cbd6abe32b24";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test06";
|
||||
isFediversityVm = true;
|
||||
vmId = 7006;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
17
machines/operator/test06/mastodon.nix
Normal file
17
machines/operator/test06/mastodon.nix
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
{ pkgs, ... }:
|
||||
let
|
||||
mastodonS3KeyConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GK3515373e4c851ebaad366558";
|
||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "7d37d093435a41f2aab8f13c19ba067d9776c90215f56614adad6ece597dbb34";
|
||||
};
|
||||
in
|
||||
{
|
||||
fediversity = {
|
||||
mastodon = mastodonS3KeyConfig { inherit pkgs; } // {
|
||||
enable = true;
|
||||
};
|
||||
temp.cores = 1; # FIXME: should come from NixOps4 eventually
|
||||
};
|
||||
}
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
{
|
||||
_class = "nixops4Resource";
|
||||
|
||||
fediversityVm = {
|
||||
name = "test11";
|
||||
isFediversityVm = true;
|
||||
vmId = 7011;
|
||||
proxmox = "fediversity";
|
||||
|
||||
hostPublicKey = builtins.readFile ./ssh_host_ed25519_key.pub;
|
||||
unsafeHostPrivateKey = builtins.readFile ./ssh_host_ed25519_key;
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue