forked from Fediversity/Fediversity
Compare commits
47 commits
main
...
npins-verb
Author | SHA1 | Date | |
---|---|---|---|
0fb2ef22ce | |||
e790a4450f | |||
dabeae4695 | |||
4495da07ba | |||
4ae4190415 | |||
d9a20a1fa2 | |||
550f5cb584 | |||
a435b5447a | |||
5d11766c2d | |||
8278f6781a | |||
5ccffb0621 | |||
7655f6a38e | |||
fe789c1819 | |||
0a615fa961 | |||
aa0ead9c7c | |||
8fab923899 | |||
7eeeee4543 | |||
a5ec137b4f | |||
16e1f9a6a3 | |||
2cee7b315b | |||
50f3d01aec | |||
45e97e8339 | |||
ebd79d2d5e | |||
3622dc817a | |||
5cf5a121e4 | |||
b6eebd577f | |||
5140fe5935 | |||
761d74a109 | |||
8ff5b87d00 | |||
b9d406f437 | |||
6755385536 | |||
96e2f4ac5d | |||
ac68a23805 | |||
d0aaf18a70 | |||
1b03238b06 | |||
2cef9589db | |||
14a5e057b9 | |||
f11f28eda8 | |||
fabb42e05d | |||
0b809bf866 | |||
cf35f423e1 | |||
224cbedc1e | |||
953bbc7349 | |||
3a4d0679d5 | |||
a528c4e148 | |||
8e02684bf8 | |||
765183cd0d |
28 changed files with 838 additions and 42 deletions
25
.forgejo/workflows/cache.yaml
Normal file
25
.forgejo/workflows/cache.yaml
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
name: cache-build
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # allows manual triggering
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
# - main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: native
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- run: attic login fediversity https://attic.fediversity.net ${{ secrets.ATTIC_PUSH_KEY }} && attic use demo
|
||||||
|
|
||||||
|
- name: Set up SSH key to access age secrets
|
||||||
|
run: |
|
||||||
|
env
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
echo "${{ secrets.CD_SSH_KEY }}" > ~/.ssh/id_ed25519
|
||||||
|
chmod 600 ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
- name: Deploy
|
||||||
|
- run: attic push demo $(nix-build)
|
|
@ -10,13 +10,13 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-pre-commit:
|
check-pre-commit:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix-build -A tests
|
- run: nix-build -A tests
|
||||||
|
|
||||||
check-data-model:
|
check-data-model:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
|
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
|
||||||
|
@ -28,31 +28,31 @@ jobs:
|
||||||
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
|
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
|
||||||
|
|
||||||
check-peertube:
|
check-peertube:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
|
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
|
||||||
|
|
||||||
check-panel:
|
check-panel:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix-build -A tests.panel
|
- run: nix-build panel -A tests
|
||||||
|
|
||||||
check-deployment-basic:
|
check-deployment-basic:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix build .#checks.x86_64-linux.deployment-basic -L
|
- run: nix build .#checks.x86_64-linux.deployment-basic -L
|
||||||
|
|
||||||
check-deployment-cli:
|
check-deployment-cli:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix build .#checks.x86_64-linux.deployment-cli -L
|
- run: nix build .#checks.x86_64-linux.deployment-cli -L
|
||||||
|
|
||||||
check-deployment-panel:
|
check-deployment-panel:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- run: nix build .#checks.x86_64-linux.deployment-panel -L
|
- run: nix build .#checks.x86_64-linux.deployment-panel -L
|
||||||
|
|
|
@ -8,12 +8,12 @@ on:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lockfile:
|
lockfile:
|
||||||
runs-on: native
|
runs-on: nix
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
- name: Update pins
|
- name: Update pins
|
||||||
run: nix-shell --run "npins update"
|
run: nix-shell --run "npins --verbose update"
|
||||||
- name: Create PR
|
- name: Create PR
|
||||||
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
|
uses: https://github.com/KiaraGrouwstra/gitea-create-pull-request@f9f80aa5134bc5c03c38f5aaa95053492885b397
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -59,12 +59,14 @@ in
|
||||||
in
|
in
|
||||||
[
|
[
|
||||||
pkgs.npins
|
pkgs.npins
|
||||||
|
pkgs.attic-client
|
||||||
pkgs.nil
|
pkgs.nil
|
||||||
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
|
(pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { })
|
||||||
pkgs.openssh
|
pkgs.openssh
|
||||||
pkgs.httpie
|
pkgs.httpie
|
||||||
pkgs.jq
|
pkgs.jq
|
||||||
pkgs.nix-unit
|
pkgs.nix-unit
|
||||||
|
pkgs.attic-client
|
||||||
test-loop
|
test-loop
|
||||||
nixops4.packages.${system}.default
|
nixops4.packages.${system}.default
|
||||||
];
|
];
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
"mastodon"
|
"mastodon"
|
||||||
"peertube"
|
"peertube"
|
||||||
"pixelfed"
|
"pixelfed"
|
||||||
|
"attic"
|
||||||
];
|
];
|
||||||
pathToRoot = ../../..;
|
pathToRoot = ../../..;
|
||||||
pathFromRoot = ./.;
|
pathFromRoot = ./.;
|
||||||
|
|
91
deployment/check/cli/flake-part.nix
Normal file
91
deployment/check/cli/flake-part.nix
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
{
|
||||||
|
self,
|
||||||
|
inputs,
|
||||||
|
lib,
|
||||||
|
sources,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
inherit (builtins) fromJSON readFile listToAttrs;
|
||||||
|
|
||||||
|
targetMachines = [
|
||||||
|
"garage"
|
||||||
|
"mastodon"
|
||||||
|
"peertube"
|
||||||
|
"pixelfed"
|
||||||
|
"attic"
|
||||||
|
];
|
||||||
|
pathToRoot = /. + (builtins.unsafeDiscardStringContext self);
|
||||||
|
pathFromRoot = ./.;
|
||||||
|
enableAcme = true;
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
_class = "flake";
|
||||||
|
|
||||||
|
perSystem =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
checks.deployment-cli = pkgs.testers.runNixOSTest {
|
||||||
|
imports = [
|
||||||
|
../common/nixosTest.nix
|
||||||
|
./nixosTest.nix
|
||||||
|
];
|
||||||
|
_module.args = { inherit inputs sources; };
|
||||||
|
inherit
|
||||||
|
targetMachines
|
||||||
|
pathToRoot
|
||||||
|
pathFromRoot
|
||||||
|
enableAcme
|
||||||
|
;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
nixops4Deployments =
|
||||||
|
let
|
||||||
|
makeTargetResource = nodeName: {
|
||||||
|
imports = [ ../common/targetResource.nix ];
|
||||||
|
_module.args = { inherit inputs sources; };
|
||||||
|
inherit
|
||||||
|
nodeName
|
||||||
|
pathToRoot
|
||||||
|
pathFromRoot
|
||||||
|
enableAcme
|
||||||
|
;
|
||||||
|
};
|
||||||
|
|
||||||
|
## The deployment function - what we are here to test!
|
||||||
|
##
|
||||||
|
## TODO: Modularise `deployment/default.nix` to get rid of the nested
|
||||||
|
## function calls.
|
||||||
|
makeTestDeployment =
|
||||||
|
args:
|
||||||
|
(import ../..)
|
||||||
|
{
|
||||||
|
inherit lib;
|
||||||
|
inherit (inputs) nixops4 nixops4-nixos;
|
||||||
|
fediversity = import ../../../services/fediversity;
|
||||||
|
}
|
||||||
|
(listToAttrs (
|
||||||
|
map (nodeName: {
|
||||||
|
name = "${nodeName}ConfigurationResource";
|
||||||
|
value = makeTargetResource nodeName;
|
||||||
|
}) targetMachines
|
||||||
|
))
|
||||||
|
(fromJSON (readFile ../../configuration.sample.json) // args);
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
check-deployment-cli-nothing = makeTestDeployment { };
|
||||||
|
|
||||||
|
check-deployment-cli-mastodon-pixelfed = makeTestDeployment {
|
||||||
|
mastodon.enable = true;
|
||||||
|
pixelfed.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
check-deployment-cli-peertube = makeTestDeployment {
|
||||||
|
peertube.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -45,6 +45,8 @@ in
|
||||||
peertube.inputDerivation
|
peertube.inputDerivation
|
||||||
gixy
|
gixy
|
||||||
gixy.inputDerivation
|
gixy.inputDerivation
|
||||||
|
shellcheck
|
||||||
|
shellcheck.inputDerivation
|
||||||
];
|
];
|
||||||
|
|
||||||
system.extraDependenciesFromModule = {
|
system.extraDependenciesFromModule = {
|
||||||
|
@ -68,6 +70,11 @@ in
|
||||||
s3AccessKeyFile = dummyFile;
|
s3AccessKeyFile = dummyFile;
|
||||||
s3SecretKeyFile = dummyFile;
|
s3SecretKeyFile = dummyFile;
|
||||||
};
|
};
|
||||||
|
attic = {
|
||||||
|
enable = true;
|
||||||
|
s3AccessKeyFile = dummyFile;
|
||||||
|
s3SecretKeyFile = dummyFile;
|
||||||
|
};
|
||||||
temp.cores = 1;
|
temp.cores = 1;
|
||||||
temp.initialUser = {
|
temp.initialUser = {
|
||||||
username = "dummy";
|
username = "dummy";
|
||||||
|
@ -92,6 +99,7 @@ in
|
||||||
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
||||||
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
||||||
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
||||||
|
nodes.attic.virtualisation.memorySize = 2 * 1024;
|
||||||
|
|
||||||
## FIXME: The test of presence of the services are very simple: we only
|
## FIXME: The test of presence of the services are very simple: we only
|
||||||
## check that there is a systemd service of the expected name on the
|
## check that there is a systemd service of the expected name on the
|
||||||
|
@ -106,6 +114,7 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with no services enabled"):
|
with subtest("Run deployment with no services enabled"):
|
||||||
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
|
deployer.succeed("nixops4 apply check-deployment-cli-nothing --show-trace --no-interactive 1>&2")
|
||||||
|
@ -115,6 +124,7 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
||||||
deployer.succeed("nixops4 apply check-deployment-cli-mastodon-pixelfed --show-trace --no-interactive 1>&2")
|
deployer.succeed("nixops4 apply check-deployment-cli-mastodon-pixelfed --show-trace --no-interactive 1>&2")
|
||||||
|
@ -124,6 +134,7 @@ in
|
||||||
mastodon.succeed("systemctl status mastodon-web.service")
|
mastodon.succeed("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with only Peertube enabled"):
|
with subtest("Run deployment with only Peertube enabled"):
|
||||||
deployer.succeed("nixops4 apply check-deployment-cli-peertube --show-trace --no-interactive 1>&2")
|
deployer.succeed("nixops4 apply check-deployment-cli-peertube --show-trace --no-interactive 1>&2")
|
||||||
|
@ -133,5 +144,6 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.succeed("systemctl status peertube.service")
|
peertube.succeed("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ in
|
||||||
## default. These values have been trimmed down to the gigabyte.
|
## default. These values have been trimmed down to the gigabyte.
|
||||||
## Memory use is expected to be dominated by the NixOS evaluation,
|
## Memory use is expected to be dominated by the NixOS evaluation,
|
||||||
## which happens on the deployer.
|
## which happens on the deployer.
|
||||||
memorySize = 4 * 1024;
|
memorySize = 5 * 1024;
|
||||||
diskSize = 4 * 1024;
|
diskSize = 4 * 1024;
|
||||||
cores = 2;
|
cores = 2;
|
||||||
};
|
};
|
||||||
|
@ -59,8 +59,10 @@ in
|
||||||
inputs.nixpkgs
|
inputs.nixpkgs
|
||||||
|
|
||||||
sources.flake-parts
|
sources.flake-parts
|
||||||
|
sources.nixpkgs
|
||||||
sources.flake-inputs
|
sources.flake-inputs
|
||||||
sources.git-hooks
|
sources.git-hooks
|
||||||
|
sources.vars
|
||||||
|
|
||||||
pkgs.stdenv
|
pkgs.stdenv
|
||||||
pkgs.stdenvNoCC
|
pkgs.stdenvNoCC
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
"mastodon"
|
"mastodon"
|
||||||
"peertube"
|
"peertube"
|
||||||
"pixelfed"
|
"pixelfed"
|
||||||
|
"attic"
|
||||||
];
|
];
|
||||||
pathToRoot = ../../..;
|
pathToRoot = ../../..;
|
||||||
pathFromRoot = ./.;
|
pathFromRoot = ./.;
|
||||||
|
|
|
@ -33,6 +33,7 @@ let
|
||||||
enableMastodon,
|
enableMastodon,
|
||||||
enablePeertube,
|
enablePeertube,
|
||||||
enablePixelfed,
|
enablePixelfed,
|
||||||
|
enableAttic,
|
||||||
}:
|
}:
|
||||||
hostPkgs.writers.writePython3Bin "interact-with-panel"
|
hostPkgs.writers.writePython3Bin "interact-with-panel"
|
||||||
{
|
{
|
||||||
|
@ -94,6 +95,7 @@ let
|
||||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
|
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'mastodon.enable']"), ${toPythonBool enableMastodon})
|
||||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
|
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'peertube.enable']"), ${toPythonBool enablePeertube})
|
||||||
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
|
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'pixelfed.enable']"), ${toPythonBool enablePixelfed})
|
||||||
|
checkbox_set(driver.find_element(By.XPATH, "//input[@name = 'attic.enable']"), ${toPythonBool enableAttic})
|
||||||
|
|
||||||
print("Start deployment...")
|
print("Start deployment...")
|
||||||
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
|
driver.find_element(By.XPATH, "//button[@id = 'deploy-button']").click()
|
||||||
|
@ -208,6 +210,11 @@ in
|
||||||
s3AccessKeyFile = dummyFile;
|
s3AccessKeyFile = dummyFile;
|
||||||
s3SecretKeyFile = dummyFile;
|
s3SecretKeyFile = dummyFile;
|
||||||
};
|
};
|
||||||
|
attic = {
|
||||||
|
enable = true;
|
||||||
|
s3AccessKeyFile = dummyFile;
|
||||||
|
s3SecretKeyFile = dummyFile;
|
||||||
|
};
|
||||||
temp.cores = 1;
|
temp.cores = 1;
|
||||||
temp.initialUser = {
|
temp.initialUser = {
|
||||||
username = "dummy";
|
username = "dummy";
|
||||||
|
@ -253,6 +260,7 @@ in
|
||||||
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
nodes.mastodon.virtualisation.memorySize = 4 * 1024;
|
||||||
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
nodes.pixelfed.virtualisation.memorySize = 4 * 1024;
|
||||||
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
nodes.peertube.virtualisation.memorySize = 5 * 1024;
|
||||||
|
nodes.attic.virtualisation.memorySize = 4 * 1024;
|
||||||
|
|
||||||
## FIXME: The test of presence of the services are very simple: we only
|
## FIXME: The test of presence of the services are very simple: we only
|
||||||
## check that there is a systemd service of the expected name on the
|
## check that there is a systemd service of the expected name on the
|
||||||
|
@ -325,6 +333,7 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with no services enabled"):
|
with subtest("Run deployment with no services enabled"):
|
||||||
client.succeed("${
|
client.succeed("${
|
||||||
|
@ -333,6 +342,7 @@ in
|
||||||
enableMastodon = false;
|
enableMastodon = false;
|
||||||
enablePeertube = false;
|
enablePeertube = false;
|
||||||
enablePixelfed = false;
|
enablePixelfed = false;
|
||||||
|
enableAttic = false;
|
||||||
}
|
}
|
||||||
}/bin/interact-with-panel >&2")
|
}/bin/interact-with-panel >&2")
|
||||||
|
|
||||||
|
@ -341,6 +351,7 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
with subtest("Run deployment with Mastodon and Pixelfed enabled"):
|
||||||
client.succeed("${
|
client.succeed("${
|
||||||
|
@ -349,6 +360,7 @@ in
|
||||||
enableMastodon = true;
|
enableMastodon = true;
|
||||||
enablePeertube = false;
|
enablePeertube = false;
|
||||||
enablePixelfed = true;
|
enablePixelfed = true;
|
||||||
|
enableAttic = false;
|
||||||
}
|
}
|
||||||
}/bin/interact-with-panel >&2")
|
}/bin/interact-with-panel >&2")
|
||||||
|
|
||||||
|
@ -357,6 +369,7 @@ in
|
||||||
mastodon.succeed("systemctl status mastodon-web.service")
|
mastodon.succeed("systemctl status mastodon-web.service")
|
||||||
peertube.fail("systemctl status peertube.service")
|
peertube.fail("systemctl status peertube.service")
|
||||||
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
pixelfed.succeed("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
|
|
||||||
with subtest("Run deployment with only Peertube enabled"):
|
with subtest("Run deployment with only Peertube enabled"):
|
||||||
client.succeed("${
|
client.succeed("${
|
||||||
|
@ -365,6 +378,7 @@ in
|
||||||
enableMastodon = false;
|
enableMastodon = false;
|
||||||
enablePeertube = true;
|
enablePeertube = true;
|
||||||
enablePixelfed = false;
|
enablePixelfed = false;
|
||||||
|
enableAttic = false;
|
||||||
}
|
}
|
||||||
}/bin/interact-with-panel >&2")
|
}/bin/interact-with-panel >&2")
|
||||||
|
|
||||||
|
@ -373,5 +387,6 @@ in
|
||||||
mastodon.fail("systemctl status mastodon-web.service")
|
mastodon.fail("systemctl status mastodon-web.service")
|
||||||
peertube.succeed("systemctl status peertube.service")
|
peertube.succeed("systemctl status peertube.service")
|
||||||
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
pixelfed.fail("systemctl status phpfpm-pixelfed.service")
|
||||||
|
attic.fail("systemctl status atticd.service")
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
"mastodon": { "enable": false },
|
"mastodon": { "enable": false },
|
||||||
"peertube": { "enable": false },
|
"peertube": { "enable": false },
|
||||||
"pixelfed": { "enable": false },
|
"pixelfed": { "enable": false },
|
||||||
|
"attic": { "enable": false },
|
||||||
"initialUser": {
|
"initialUser": {
|
||||||
"displayName": "Testy McTestface",
|
"displayName": "Testy McTestface",
|
||||||
"username": "test",
|
"username": "test",
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
mastodonConfigurationResource,
|
mastodonConfigurationResource,
|
||||||
peertubeConfigurationResource,
|
peertubeConfigurationResource,
|
||||||
pixelfedConfigurationResource,
|
pixelfedConfigurationResource,
|
||||||
|
atticConfigurationResource,
|
||||||
}:
|
}:
|
||||||
|
|
||||||
## From the hosting provider's perspective, the function is meant to be
|
## From the hosting provider's perspective, the function is meant to be
|
||||||
|
@ -55,6 +56,7 @@ let
|
||||||
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
|
mastodon = nonNull panelConfigNullable.mastodon { enable = false; };
|
||||||
peertube = nonNull panelConfigNullable.peertube { enable = false; };
|
peertube = nonNull panelConfigNullable.peertube { enable = false; };
|
||||||
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
|
pixelfed = nonNull panelConfigNullable.pixelfed { enable = false; };
|
||||||
|
attic = nonNull panelConfigNullable.attic { enable = false; };
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|
||||||
|
@ -107,6 +109,13 @@ in
|
||||||
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
|
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKb5615457d44214411e673b7b";
|
||||||
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
|
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "5be6799a88ca9b9d813d1a806b64f15efa49482dbe15339ddfaf7f19cf434987";
|
||||||
};
|
};
|
||||||
|
atticS3KeyConfig =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
# REVIEW: how were these generated above? how do i add one?
|
||||||
|
s3AccessKeyFile = pkgs.writeText "s3AccessKey" "GKaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||||
|
s3SecretKeyFile = pkgs.writeText "s3SecretKey" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||||
|
};
|
||||||
|
|
||||||
makeConfigurationResource = resourceModule: config: {
|
makeConfigurationResource = resourceModule: config: {
|
||||||
type = providers.local.exec;
|
type = providers.local.exec;
|
||||||
|
@ -140,13 +149,14 @@ in
|
||||||
{
|
{
|
||||||
garage-configuration = makeConfigurationResource garageConfigurationResource (
|
garage-configuration = makeConfigurationResource garageConfigurationResource (
|
||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable) {
|
mkIf (cfg.mastodon.enable || cfg.peertube.enable || cfg.pixelfed.enable || cfg.attic.enable) {
|
||||||
fediversity = {
|
fediversity = {
|
||||||
inherit (cfg) domain;
|
inherit (cfg) domain;
|
||||||
garage.enable = true;
|
garage.enable = true;
|
||||||
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
|
pixelfed = pixelfedS3KeyConfig { inherit pkgs; };
|
||||||
mastodon = mastodonS3KeyConfig { inherit pkgs; };
|
mastodon = mastodonS3KeyConfig { inherit pkgs; };
|
||||||
peertube = peertubeS3KeyConfig { inherit pkgs; };
|
peertube = peertubeS3KeyConfig { inherit pkgs; };
|
||||||
|
attic = atticS3KeyConfig { inherit pkgs; };
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
@ -213,6 +223,25 @@ in
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
attic-configuration = makeConfigurationResource atticConfigurationResource (
|
||||||
|
{ pkgs, ... }:
|
||||||
|
mkIf cfg.attic.enable {
|
||||||
|
fediversity = {
|
||||||
|
inherit (cfg) domain;
|
||||||
|
temp.initialUser = {
|
||||||
|
inherit (cfg.initialUser) username email displayName;
|
||||||
|
# FIXME: disgusting, but nvm, this is going to be replaced by
|
||||||
|
# proper central authentication at some point
|
||||||
|
passwordFile = pkgs.writeText "password" cfg.initialUser.password;
|
||||||
|
};
|
||||||
|
|
||||||
|
attic = atticS3KeyConfig { inherit pkgs; } // {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,6 +71,19 @@ in
|
||||||
});
|
});
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
attic = mkOption {
|
||||||
|
description = ''
|
||||||
|
Configuration for the Attic service
|
||||||
|
'';
|
||||||
|
type =
|
||||||
|
with types;
|
||||||
|
nullOr (submodule {
|
||||||
|
options = {
|
||||||
|
enable = lib.mkEnableOption "Attic";
|
||||||
|
};
|
||||||
|
});
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
initialUser = mkOption {
|
initialUser = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Some services require an initial user to access them.
|
Some services require an initial user to access them.
|
||||||
|
|
|
@ -24,6 +24,14 @@ in
|
||||||
experimental-features = nix-command flakes
|
experimental-features = nix-command flakes
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
nix.settings = {
|
||||||
|
substituters = [
|
||||||
|
"https://attic.fediversity.net/demo"
|
||||||
|
];
|
||||||
|
trusted-public-keys = [
|
||||||
|
"demo:N3CAZ049SeBVqBM+OnhLMrxWJ9altbD/aoJtHrY19KM="
|
||||||
|
];
|
||||||
|
};
|
||||||
boot.loader = {
|
boot.loader = {
|
||||||
systemd-boot.enable = true;
|
systemd-boot.enable = true;
|
||||||
efi.canTouchEfiVariables = true;
|
efi.canTouchEfiVariables = true;
|
||||||
|
|
|
@ -43,7 +43,7 @@ table inet filter {
|
||||||
ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept
|
ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, echo-reply, echo-request, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert, packet-too-big, parameter-problem, time-exceeded } accept
|
||||||
|
|
||||||
# open tcp ports: sshd (22)
|
# open tcp ports: sshd (22)
|
||||||
tcp dport {ssh} accept
|
tcp dport ssh accept
|
||||||
|
|
||||||
# open tcp ports: snmp (161)
|
# open tcp ports: snmp (161)
|
||||||
ip saddr $snmp_allow udp dport {snmp} accept
|
ip saddr $snmp_allow udp dport {snmp} accept
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
_class = "nixos";
|
_class = "nixos";
|
||||||
|
|
||||||
users.users = {
|
users.users = {
|
||||||
root.openssh.authorizedKeys.keys = config.user.users.procolix.openssh.authorizedKeys.keys;
|
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
|
||||||
|
|
||||||
procolix = {
|
procolix = {
|
||||||
isNormalUser = true;
|
isNormalUser = true;
|
||||||
|
|
|
@ -32,9 +32,11 @@ in
|
||||||
## options that really need to be injected from the resource. Everything else
|
## options that really need to be injected from the resource. Everything else
|
||||||
## should go into the `./nixos` subdirectory.
|
## should go into the `./nixos` subdirectory.
|
||||||
nixos.module = {
|
nixos.module = {
|
||||||
imports = [
|
imports = with sources; [
|
||||||
"${sources.agenix}/modules/age.nix"
|
"${agenix}/modules/age.nix"
|
||||||
"${sources.disko}/module.nix"
|
"${disko}/module.nix"
|
||||||
|
"${vars}/options.nix"
|
||||||
|
"${vars}/backends/on-machine.nix"
|
||||||
./options.nix
|
./options.nix
|
||||||
./nixos
|
./nixos
|
||||||
];
|
];
|
||||||
|
|
|
@ -126,6 +126,10 @@ let
|
||||||
vmName = "test04";
|
vmName = "test04";
|
||||||
isTestVm = true;
|
isTestVm = true;
|
||||||
};
|
};
|
||||||
|
atticConfigurationResource = makeResourceModule {
|
||||||
|
vmName = "test12";
|
||||||
|
isTestVm = true;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
nixops4ResourceNixosMockOptions = {
|
nixops4ResourceNixosMockOptions = {
|
||||||
|
|
|
@ -16,10 +16,4 @@
|
||||||
gateway = "2a00:51c0:13:1305::1";
|
gateway = "2a00:51c0:13:1305::1";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
nixos.module = {
|
|
||||||
imports = [
|
|
||||||
../../../infra/common/proxmox-qemu-vm.nix
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,86 @@
|
||||||
{ pkgs, config, ... }:
|
{
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
system = builtins.currentSystem;
|
||||||
|
sources = import ../../../npins;
|
||||||
|
packages =
|
||||||
|
let
|
||||||
|
inherit (import sources.flake-inputs) import-flake;
|
||||||
|
inherit ((import-flake { src = ../../..; }).inputs) nixops4;
|
||||||
|
in
|
||||||
|
[
|
||||||
|
pkgs.attic-client
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.findutils
|
||||||
|
pkgs.gnugrep
|
||||||
|
pkgs.gawk
|
||||||
|
pkgs.git
|
||||||
|
pkgs.nix
|
||||||
|
pkgs.bash
|
||||||
|
pkgs.jq
|
||||||
|
pkgs.nodejs
|
||||||
|
pkgs.npins
|
||||||
|
nixops4.packages.${system}.default
|
||||||
|
];
|
||||||
|
storeDeps = pkgs.runCommand "store-deps" { } ''
|
||||||
|
mkdir -p $out/bin
|
||||||
|
for dir in ${toString packages}; do
|
||||||
|
for bin in "$dir"/bin/*; do
|
||||||
|
ln -s "$bin" "$out/bin/$(basename "$bin")"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
# Add SSL CA certs
|
||||||
|
mkdir -p $out/etc/ssl/certs
|
||||||
|
cp -a "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" $out/etc/ssl/certs/ca-bundle.crt
|
||||||
|
'';
|
||||||
|
numInstances = 5;
|
||||||
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
_class = "nixos";
|
_class = "nixos";
|
||||||
|
|
||||||
|
imports = with sources; [
|
||||||
|
"${home-manager}/nixos"
|
||||||
|
"${vars}/options.nix"
|
||||||
|
"${vars}/backends/on-machine.nix"
|
||||||
|
];
|
||||||
|
|
||||||
services.gitea-actions-runner = {
|
services.gitea-actions-runner = {
|
||||||
package = pkgs.forgejo-actions-runner;
|
package = pkgs.forgejo-actions-runner;
|
||||||
|
instances = lib.genAttrs (builtins.genList (n: "nix${builtins.toString n}") numInstances) (_: {
|
||||||
instances.default = {
|
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
name = config.networking.fqdn;
|
name = config.networking.fqdn;
|
||||||
url = "https://git.fediversity.eu";
|
url = "https://git.fediversity.eu";
|
||||||
tokenFile = config.age.secrets.forgejo-runner-token.path;
|
tokenFile = config.age.secrets.forgejo-runner-token.path;
|
||||||
|
## This runner supports Docker (with a default Ubuntu image) and native
|
||||||
|
## modes. In native mode, it contains a few default packages.
|
||||||
|
labels = [
|
||||||
|
"nix:docker://gitea-runner-nix"
|
||||||
|
"docker:docker://node:16-bullseye"
|
||||||
|
"native:host"
|
||||||
|
];
|
||||||
|
hostPackages = with pkgs; [
|
||||||
|
attic-client
|
||||||
|
bash
|
||||||
|
git
|
||||||
|
nix
|
||||||
|
nodejs
|
||||||
|
];
|
||||||
settings = {
|
settings = {
|
||||||
|
container = {
|
||||||
|
options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm";
|
||||||
|
# the default network that also respects our dns server settings
|
||||||
|
network = "host";
|
||||||
|
valid_volumes = [
|
||||||
|
"/nix"
|
||||||
|
"${storeDeps}/bin"
|
||||||
|
"${storeDeps}/etc/ssl"
|
||||||
|
];
|
||||||
|
};
|
||||||
log.level = "info";
|
log.level = "info";
|
||||||
runner = {
|
runner = {
|
||||||
file = ".runner";
|
file = ".runner";
|
||||||
|
@ -25,23 +92,165 @@
|
||||||
fetch_interval = "2s";
|
fetch_interval = "2s";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
## This runner supports Docker (with a default Ubuntu image) and native
|
users = {
|
||||||
## modes. In native mode, it contains a few default packages.
|
users.nixuser = {
|
||||||
labels = [
|
group = "nixuser";
|
||||||
"docker:docker://node:16-bullseye"
|
description = "Used for running nix ci jobs";
|
||||||
"native:host"
|
home = "/var/empty";
|
||||||
];
|
isSystemUser = true;
|
||||||
|
};
|
||||||
hostPackages = with pkgs; [
|
groups.nixuser = { };
|
||||||
bash
|
};
|
||||||
git
|
virtualisation = {
|
||||||
nix
|
## For the Docker mode of the runner.
|
||||||
nodejs
|
## Podman seemed to get stuck on the checkout step
|
||||||
|
docker.enable = true;
|
||||||
|
containers.containersConf.settings = {
|
||||||
|
# podman (at least) seems to not work with systemd-resolved
|
||||||
|
containers.dns_servers = [
|
||||||
|
"8.8.8.8"
|
||||||
|
"8.8.4.4"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
systemd.services =
|
||||||
|
{
|
||||||
|
gitea-runner-nix-image = {
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "docker.service" ];
|
||||||
|
requires = [ "docker.service" ];
|
||||||
|
path = [
|
||||||
|
pkgs.docker
|
||||||
|
pkgs.gnutar
|
||||||
|
pkgs.shadow
|
||||||
|
pkgs.getent
|
||||||
|
];
|
||||||
|
# we also include etc here because the cleanup job also wants the nixuser to be present
|
||||||
|
script = ''
|
||||||
|
set -eux -o pipefail
|
||||||
|
mkdir -p etc/nix
|
||||||
|
|
||||||
## For the Docker mode of the runner.
|
# Create an unpriveleged user that we can use also without the run-as-user.sh script
|
||||||
virtualisation.docker.enable = true;
|
touch etc/passwd etc/group
|
||||||
|
groupid=$(cut -d: -f3 < <(getent group nixuser))
|
||||||
|
userid=$(cut -d: -f3 < <(getent passwd nixuser))
|
||||||
|
groupadd --prefix $(pwd) --gid "$groupid" nixuser
|
||||||
|
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
|
||||||
|
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
|
||||||
|
|
||||||
|
cat <<NIX_CONFIG > etc/nix/nix.conf
|
||||||
|
accept-flake-config = true
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
NIX_CONFIG
|
||||||
|
|
||||||
|
cat <<NSSWITCH > etc/nsswitch.conf
|
||||||
|
passwd: files mymachines systemd
|
||||||
|
group: files mymachines systemd
|
||||||
|
shadow: files
|
||||||
|
|
||||||
|
hosts: files mymachines dns myhostname
|
||||||
|
networks: files
|
||||||
|
|
||||||
|
ethers: files
|
||||||
|
services: files
|
||||||
|
protocols: files
|
||||||
|
rpc: files
|
||||||
|
NSSWITCH
|
||||||
|
|
||||||
|
# list the content as it will be imported into the container
|
||||||
|
tar -cv . | tar -tvf -
|
||||||
|
tar -cv . | docker import - gitea-runner-nix
|
||||||
|
'';
|
||||||
|
serviceConfig = {
|
||||||
|
RuntimeDirectory = "gitea-runner-nix-image";
|
||||||
|
WorkingDirectory = "/run/gitea-runner-nix-image";
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") numInstances) (
|
||||||
|
_:
|
||||||
|
let
|
||||||
|
requires = [ "gitea-runner-nix-image.service" ];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
inherit requires;
|
||||||
|
after = requires;
|
||||||
|
# TODO: systemd confinement
|
||||||
|
serviceConfig = {
|
||||||
|
# Hardening (may overlap with DynamicUser=)
|
||||||
|
# The following options are only for optimizing output of systemd-analyze
|
||||||
|
AmbientCapabilities = "";
|
||||||
|
CapabilityBoundingSet = "";
|
||||||
|
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||||
|
DeviceAllow = "";
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
PrivateMounts = true;
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateUsers = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
ProtectHome = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
RemoveIPC = true;
|
||||||
|
RestrictNamespaces = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
UMask = "0066";
|
||||||
|
ProtectProc = "invisible";
|
||||||
|
SystemCallFilter = [
|
||||||
|
"~@clock"
|
||||||
|
"~@cpu-emulation"
|
||||||
|
"~@module"
|
||||||
|
"~@mount"
|
||||||
|
"~@obsolete"
|
||||||
|
"~@raw-io"
|
||||||
|
"~@reboot"
|
||||||
|
"~@swap"
|
||||||
|
# needed by go?
|
||||||
|
#"~@resources"
|
||||||
|
"~@privileged"
|
||||||
|
"~capset"
|
||||||
|
"~setdomainname"
|
||||||
|
"~sethostname"
|
||||||
|
];
|
||||||
|
SupplementaryGroups = [ "docker" ];
|
||||||
|
RestrictAddressFamilies = [
|
||||||
|
"AF_INET"
|
||||||
|
"AF_INET6"
|
||||||
|
"AF_UNIX"
|
||||||
|
"AF_NETLINK"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Needs network access
|
||||||
|
PrivateNetwork = false;
|
||||||
|
# Cannot be true due to Node
|
||||||
|
MemoryDenyWriteExecute = false;
|
||||||
|
|
||||||
|
# The more restrictive "pid" option makes `nix` commands in CI emit
|
||||||
|
# "GC Warning: Couldn't read /proc/stat"
|
||||||
|
# You may want to set this to "pid" if not using `nix` commands
|
||||||
|
ProcSubset = "all";
|
||||||
|
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
|
||||||
|
# ASLR (address space layout randomization) which requires the
|
||||||
|
# `personality` syscall
|
||||||
|
# You may want to set this to `true` if not using coverage tooling on
|
||||||
|
# compiled code
|
||||||
|
LockPersonality = false;
|
||||||
|
|
||||||
|
# Note that this has some interactions with the User setting; so you may
|
||||||
|
# want to consult the systemd docs if using both.
|
||||||
|
DynamicUser = true;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,4 +110,8 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# needed to imperatively run forgejo commands e.g. to generate runner tokens.
|
||||||
|
# example: `sudo su - forgejo -c 'nix-shell -p forgejo --run "gitea actions generate-runner-token -C /var/lib/forgejo/custom"'`
|
||||||
|
users.users.forgejo.isNormalUser = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,4 +18,11 @@
|
||||||
gateway = "2a00:51c0:13:1305::1";
|
gateway = "2a00:51c0:13:1305::1";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
nixos.module = {
|
||||||
|
imports = [
|
||||||
|
../../../infra/common/proxmox-qemu-vm.nix
|
||||||
|
../../../services/fediversity/attic
|
||||||
|
];
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,6 +150,19 @@
|
||||||
"revision": "f33a4d26226c05d501b9d4d3e5e60a3a59991921",
|
"revision": "f33a4d26226c05d501b9d4d3e5e60a3a59991921",
|
||||||
"url": "https://github.com/nixos/nixpkgs/archive/f33a4d26226c05d501b9d4d3e5e60a3a59991921.tar.gz",
|
"url": "https://github.com/nixos/nixpkgs/archive/f33a4d26226c05d501b9d4d3e5e60a3a59991921.tar.gz",
|
||||||
"hash": "1b6dm1sn0bdpcsmxna0zzspjaixa2dald08005fry5jrbjvwafdj"
|
"hash": "1b6dm1sn0bdpcsmxna0zzspjaixa2dald08005fry5jrbjvwafdj"
|
||||||
|
},
|
||||||
|
"vars": {
|
||||||
|
"type": "Git",
|
||||||
|
"repository": {
|
||||||
|
"type": "GitHub",
|
||||||
|
"owner": "kiaragrouwstra",
|
||||||
|
"repo": "vars"
|
||||||
|
},
|
||||||
|
"branch": "templates",
|
||||||
|
"submodules": false,
|
||||||
|
"revision": "6ff942bf2b514edaa1022a92edb6552ac32a09d1",
|
||||||
|
"url": "https://github.com/kiaragrouwstra/vars/archive/6ff942bf2b514edaa1022a92edb6552ac32a09d1.tar.gz",
|
||||||
|
"hash": "1h1q3l1l1c1j4ak5lcj2yh85jwqww74ildiak2dkd4h1js9v6cvw"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": 5
|
"version": 5
|
||||||
|
|
BIN
secrets/attic-ci-token.age
Normal file
BIN
secrets/attic-ci-token.age
Normal file
Binary file not shown.
|
@ -25,6 +25,7 @@ concatMapAttrs
|
||||||
## are able to decrypt them.
|
## are able to decrypt them.
|
||||||
|
|
||||||
{
|
{
|
||||||
|
attic-ci-token = [ forgejo-ci ];
|
||||||
forgejo-database-password = [ vm02116 ];
|
forgejo-database-password = [ vm02116 ];
|
||||||
forgejo-email-password = [ vm02116 ];
|
forgejo-email-password = [ vm02116 ];
|
||||||
forgejo-runner-token = [ forgejo-ci ];
|
forgejo-runner-token = [ forgejo-ci ];
|
||||||
|
|
347
services/fediversity/attic/default.nix
Normal file
347
services/fediversity/attic/default.nix
Normal file
|
@ -0,0 +1,347 @@
|
||||||
|
{
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
config,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
inherit (lib) mkIf mkMerge;
|
||||||
|
sources = import ../../../npins;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = with sources; [
|
||||||
|
./options.nix
|
||||||
|
"${vars}/options.nix"
|
||||||
|
"${vars}/backends/on-machine.nix"
|
||||||
|
];
|
||||||
|
|
||||||
|
config = mkMerge [
|
||||||
|
(mkIf
|
||||||
|
(
|
||||||
|
config.fediversity.garage.enable
|
||||||
|
&& config.fediversity.attic.s3AccessKeyFile != null
|
||||||
|
&& config.fediversity.attic.s3SecretKeyFile != null
|
||||||
|
)
|
||||||
|
{
|
||||||
|
fediversity.garage = {
|
||||||
|
ensureBuckets = {
|
||||||
|
attic = {
|
||||||
|
website = true;
|
||||||
|
# TODO: these are too broad, after getting everything to work narrow it down to the domain we actually want
|
||||||
|
corsRules = {
|
||||||
|
enable = true;
|
||||||
|
allowedHeaders = [ "*" ];
|
||||||
|
allowedMethods = [ "GET" ];
|
||||||
|
allowedOrigins = [ "*" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
ensureKeys = {
|
||||||
|
attic = {
|
||||||
|
inherit (config.fediversity.attic) s3AccessKeyFile s3SecretKeyFile;
|
||||||
|
ensureAccess = {
|
||||||
|
attic = {
|
||||||
|
read = true;
|
||||||
|
write = true;
|
||||||
|
owner = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
(mkIf config.fediversity.attic.enable {
|
||||||
|
|
||||||
|
services.postgresql = {
|
||||||
|
enable = true;
|
||||||
|
authentication = lib.mkForce ''
|
||||||
|
local all all trust
|
||||||
|
'';
|
||||||
|
ensureDatabases = [
|
||||||
|
"atticd"
|
||||||
|
];
|
||||||
|
ensureUsers = [
|
||||||
|
{
|
||||||
|
name = "atticd";
|
||||||
|
ensureDBOwnership = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
# open up access. 80 is necessary if only for ACME
|
||||||
|
networking.firewall.allowedTCPPorts = [
|
||||||
|
80
|
||||||
|
443
|
||||||
|
8080
|
||||||
|
9000
|
||||||
|
];
|
||||||
|
|
||||||
|
# https://wiki.nixos.org/wiki/Nginx#TLS_reverse_proxy
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
recommendedProxySettings = true;
|
||||||
|
recommendedTlsSettings = true;
|
||||||
|
virtualHosts."attic.${config.fediversity.domain}" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://127.0.0.1:8080";
|
||||||
|
proxyWebsockets = true; # needed if you need to use WebSocket
|
||||||
|
extraConfig =
|
||||||
|
# required when the target is also TLS server with multiple hosts
|
||||||
|
"proxy_ssl_server_name on;"
|
||||||
|
+
|
||||||
|
# required when the server wants to use HTTP Authentication
|
||||||
|
"proxy_pass_header Authorization;";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
vars.settings.on-machine.enable = true;
|
||||||
|
vars.generators."templates" = rec {
|
||||||
|
dependencies = [ "attic" ];
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.gnused
|
||||||
|
];
|
||||||
|
script = lib.concatStringsSep "\n" (
|
||||||
|
lib.mapAttrsToList (template: _: ''
|
||||||
|
cp "$templates/${template}" "$out/${template}"
|
||||||
|
echo "filling placeholders in template ${template}..."
|
||||||
|
${lib.concatStringsSep "\n" (
|
||||||
|
lib.mapAttrsToList (
|
||||||
|
parent:
|
||||||
|
{ placeholder, ... }:
|
||||||
|
''
|
||||||
|
sed -i "s/${placeholder}/$(cat "$in/attic/${parent}")/g" "$out/${template}"
|
||||||
|
echo "- substituted ${parent}"
|
||||||
|
''
|
||||||
|
) config.vars.generators."attic".files
|
||||||
|
)}
|
||||||
|
'') files
|
||||||
|
);
|
||||||
|
|
||||||
|
files."attic.env" = {
|
||||||
|
secret = true;
|
||||||
|
template = pkgs.writeText "attic.env" ''
|
||||||
|
ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64="${config.vars.generators.attic.files.token.placeholder}"
|
||||||
|
AWS_ACCESS_KEY_ID="$(cat ${config.fediversity.attic.s3AccessKeyFile})"
|
||||||
|
AWS_SECRET_ACCESS_KEY="$(cat ${config.fediversity.attic.s3SecretKeyFile})"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
vars.generators.attic = {
|
||||||
|
runtimeInputs = [
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.openssl
|
||||||
|
];
|
||||||
|
files.token.secret = true;
|
||||||
|
script = ''
|
||||||
|
openssl genrsa -traditional 4096 | base64 -w0 > "$out"/token
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
services.atticd = {
|
||||||
|
enable = true;
|
||||||
|
# one `monolithic` and any number of `api-server` nodes
|
||||||
|
mode = "monolithic";
|
||||||
|
|
||||||
|
environmentFile = config.vars.generators."templates".files."attic.env".path;
|
||||||
|
|
||||||
|
# https://github.com/zhaofengli/attic/blob/main/server/src/config-template.toml
|
||||||
|
settings = {
|
||||||
|
# Socket address to listen on
|
||||||
|
# listen = "[::]:8080";
|
||||||
|
listen = "0.0.0.0:8080";
|
||||||
|
# listen = "127.0.0.1:8080";
|
||||||
|
|
||||||
|
# Allowed `Host` headers
|
||||||
|
#
|
||||||
|
# This _must_ be configured for production use. If unconfigured or the
|
||||||
|
# list is empty, all `Host` headers are allowed.
|
||||||
|
allowed-hosts = [ ];
|
||||||
|
|
||||||
|
# The canonical API endpoint of this server
|
||||||
|
#
|
||||||
|
# This is the endpoint exposed to clients in `cache-config` responses.
|
||||||
|
#
|
||||||
|
# This _must_ be configured for production use. If not configured, the
|
||||||
|
# API endpoint is synthesized from the client's `Host` header which may
|
||||||
|
# be insecure.
|
||||||
|
#
|
||||||
|
# The API endpoint _must_ end with a slash (e.g., `https://domain.tld/attic/`
|
||||||
|
# not `https://domain.tld/attic`).
|
||||||
|
api-endpoint = "http://${config.fediversity.attic.domain}/";
|
||||||
|
|
||||||
|
# Whether to soft-delete caches
|
||||||
|
#
|
||||||
|
# If this is enabled, caches are soft-deleted instead of actually
|
||||||
|
# removed from the database. Note that soft-deleted caches cannot
|
||||||
|
# have their names reused as long as the original database records
|
||||||
|
# are there.
|
||||||
|
#soft-delete-caches = false;
|
||||||
|
|
||||||
|
# Whether to require fully uploading a NAR if it exists in the global cache.
|
||||||
|
#
|
||||||
|
# If set to false, simply knowing the NAR hash is enough for
|
||||||
|
# an uploader to gain access to an existing NAR in the global
|
||||||
|
# cache.
|
||||||
|
#require-proof-of-possession = true;
|
||||||
|
|
||||||
|
# Database connection
|
||||||
|
database = {
|
||||||
|
# Connection URL
|
||||||
|
#
|
||||||
|
# For production use it's recommended to use PostgreSQL.
|
||||||
|
# url = "postgresql:///atticd:password@127.0.0.1:5432/atticd";
|
||||||
|
url = "postgresql:///atticd?host=/run/postgresql";
|
||||||
|
|
||||||
|
# Whether to enable sending on periodic heartbeat queries
|
||||||
|
#
|
||||||
|
# If enabled, a heartbeat query will be sent every minute
|
||||||
|
#heartbeat = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
# File storage configuration
|
||||||
|
storage = {
|
||||||
|
# Storage type
|
||||||
|
#
|
||||||
|
# Can be "local" or "s3".
|
||||||
|
type = "s3";
|
||||||
|
|
||||||
|
# ## Local storage
|
||||||
|
|
||||||
|
# The directory to store all files under
|
||||||
|
# path = "%storage_path%";
|
||||||
|
|
||||||
|
# ## S3 Storage (set type to "s3" and uncomment below)
|
||||||
|
|
||||||
|
# The AWS region
|
||||||
|
region = "garage";
|
||||||
|
|
||||||
|
# The name of the bucket
|
||||||
|
bucket = "attic";
|
||||||
|
|
||||||
|
# Custom S3 endpoint
|
||||||
|
#
|
||||||
|
# Set this if you are using an S3-compatible object storage (e.g., Minio).
|
||||||
|
endpoint = config.fediversity.garage.api.url;
|
||||||
|
|
||||||
|
# Credentials
|
||||||
|
#
|
||||||
|
# If unset, the credentials are read from the `AWS_ACCESS_KEY_ID` and
|
||||||
|
# `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||||
|
# storage.credentials = {
|
||||||
|
# access_key_id = "";
|
||||||
|
# secret_access_key = "";
|
||||||
|
# };
|
||||||
|
};
|
||||||
|
|
||||||
|
# Data chunking
|
||||||
|
#
|
||||||
|
# Warning: If you change any of the values here, it will be
|
||||||
|
# difficult to reuse existing chunks for newly-uploaded NARs
|
||||||
|
# since the cutpoints will be different. As a result, the
|
||||||
|
# deduplication ratio will suffer for a while after the change.
|
||||||
|
chunking = {
|
||||||
|
# The minimum NAR size to trigger chunking
|
||||||
|
#
|
||||||
|
# If 0, chunking is disabled entirely for newly-uploaded NARs.
|
||||||
|
# If 1, all NARs are chunked.
|
||||||
|
nar-size-threshold = 65536; # chunk files that are 64 KiB or larger
|
||||||
|
|
||||||
|
# The preferred minimum size of a chunk, in bytes
|
||||||
|
min-size = 16384; # 16 KiB
|
||||||
|
|
||||||
|
# The preferred average size of a chunk, in bytes
|
||||||
|
avg-size = 65536; # 64 KiB
|
||||||
|
|
||||||
|
# The preferred maximum size of a chunk, in bytes
|
||||||
|
max-size = 262144; # 256 KiB
|
||||||
|
};
|
||||||
|
|
||||||
|
# Compression
|
||||||
|
compression = {
|
||||||
|
# Compression type
|
||||||
|
#
|
||||||
|
# Can be "none", "brotli", "zstd", or "xz"
|
||||||
|
type = "zstd";
|
||||||
|
|
||||||
|
# Compression level
|
||||||
|
#level = 8;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Garbage collection
|
||||||
|
garbage-collection = {
|
||||||
|
# The frequency to run garbage collection at
|
||||||
|
#
|
||||||
|
# By default it's 12 hours. You can use natural language
|
||||||
|
# to specify the interval, like "1 day".
|
||||||
|
#
|
||||||
|
# If zero, automatic garbage collection is disabled, but
|
||||||
|
# it can still be run manually with `atticd --mode garbage-collector-once`.
|
||||||
|
interval = "12 hours";
|
||||||
|
|
||||||
|
# Default retention period
|
||||||
|
#
|
||||||
|
# Zero (default) means time-based garbage-collection is
|
||||||
|
# disabled by default. You can enable it on a per-cache basis.
|
||||||
|
#default-retention-period = "6 months";
|
||||||
|
};
|
||||||
|
|
||||||
|
# jwt = {
|
||||||
|
# WARNING: Changing _anything_ in this section will break any existing
|
||||||
|
# tokens. If you need to regenerate them, ensure that you use the the
|
||||||
|
# correct secret and include the `iss` and `aud` claims.
|
||||||
|
|
||||||
|
# JWT `iss` claim
|
||||||
|
#
|
||||||
|
# Set this to the JWT issuer that you want to validate.
|
||||||
|
# If this is set, all received JWTs will validate that the `iss` claim
|
||||||
|
# matches this value.
|
||||||
|
#token-bound-issuer = "some-issuer";
|
||||||
|
|
||||||
|
# JWT `aud` claim
|
||||||
|
#
|
||||||
|
# Set this to the JWT audience(s) that you want to validate.
|
||||||
|
# If this is set, all received JWTs will validate that the `aud` claim
|
||||||
|
# contains at least one of these values.
|
||||||
|
#token-bound-audiences = ["some-audience1", "some-audience2"];
|
||||||
|
# };
|
||||||
|
|
||||||
|
# jwt.signing = {
|
||||||
|
# You must configure JWT signing and verification inside your TOML configuration by setting one of the following options in the [jwt.signing] block:
|
||||||
|
# * token-rs256-pubkey-base64
|
||||||
|
# * token-rs256-secret-base64
|
||||||
|
# * token-hs256-secret-base64
|
||||||
|
# or by setting one of the following environment variables:
|
||||||
|
# * ATTIC_SERVER_TOKEN_RS256_PUBKEY_BASE64
|
||||||
|
# * ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64
|
||||||
|
# * ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64
|
||||||
|
# Options will be tried in that same order (configuration options first, then environment options if none of the configuration options were set, starting with the respective RSA pubkey option, the RSA secret option, and finally the HMAC secret option). The first option that is found will be used.
|
||||||
|
# If an RS256 pubkey (asymmetric RSA PEM PKCS1 public key) is provided, it will only be possible to verify received JWTs, and not sign new JWTs.
|
||||||
|
# If an RS256 secret (asymmetric RSA PEM PKCS1 private key) is provided, it will be used for both signing new JWTs and verifying received JWTs.
|
||||||
|
# If an HS256 secret (symmetric HMAC secret) is provided, it will be used for both signing new JWTs and verifying received JWTs.
|
||||||
|
|
||||||
|
# JWT RS256 secret key
|
||||||
|
#
|
||||||
|
# Set this to the base64-encoded private half of an RSA PEM PKCS1 key.
|
||||||
|
# TODO
|
||||||
|
# You can also set it via the `ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64`
|
||||||
|
# environment variable.
|
||||||
|
# token-rs256-secret-base64 = "%token_rs256_secret_base64%";
|
||||||
|
|
||||||
|
# JWT HS256 secret key
|
||||||
|
#
|
||||||
|
# Set this to the base64-encoded HMAC secret key.
|
||||||
|
# You can also set it via the `ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64`
|
||||||
|
# environment variable.
|
||||||
|
#token-hs256-secret-base64 = "";
|
||||||
|
# };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
})
|
||||||
|
];
|
||||||
|
}
|
14
services/fediversity/attic/options.nix
Normal file
14
services/fediversity/attic/options.nix
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
{ config, lib, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
options.fediversity.attic =
|
||||||
|
(import ../sharedOptions.nix {
|
||||||
|
inherit config lib;
|
||||||
|
serviceName = "attic";
|
||||||
|
serviceDocName = "Attic Nix Cache server";
|
||||||
|
})
|
||||||
|
//
|
||||||
|
|
||||||
|
{
|
||||||
|
};
|
||||||
|
}
|
|
@ -13,6 +13,7 @@ in
|
||||||
./mastodon
|
./mastodon
|
||||||
./pixelfed
|
./pixelfed
|
||||||
./peertube
|
./peertube
|
||||||
|
./attic
|
||||||
];
|
];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
|
Loading…
Add table
Reference in a new issue