Compare commits

..

17 commits

Author SHA1 Message Date
95873fd960
increase numInstances to 5 2025-07-11 16:19:35 +02:00
414d786ee0
Revert "switch to podman"
This reverts commit 60e7b841a9.
2025-07-11 16:19:35 +02:00
e757236e48
runs-on: nix 2025-07-11 16:19:35 +02:00
774d17aa45
switch to podman 2025-07-11 16:19:35 +02:00
af708d05ba
increase numInstances to 3 2025-07-11 16:19:35 +02:00
66828d41b1
add note on podman attempt 2025-07-11 16:19:35 +02:00
85c5305593
reconciliate old/new runners 2025-07-11 16:19:35 +02:00
4631a2398c
explicitly use custom container in CI 2025-07-11 16:19:35 +02:00
9c75365609
explicitly allow running command to manually generating tokens from forgejo machine
additionally serves to document the needed command, for future
automation.
2025-07-11 16:19:35 +02:00
824c37c392
set up ci container from clan
credit:
https://discourse.nixos.org/t/gitea-nix-actions-runner-setup/35279
2025-07-11 16:19:35 +02:00
b0ca411ac2
try and recreate the container from icewind
see:
https://icewind.nl/entry/gitea-actions-nix/#using-nix-to-build-our-nix-image

> Error: crun: cannot find `` in $PATH: No such file or directory: OCI
runtime attempted to invoke a command that was not found
2025-07-11 16:19:35 +02:00
d566852471
runs-on: docker 2025-07-11 16:19:35 +02:00
21eaf8fea7
rm runner file 2025-07-11 16:19:35 +02:00
dc47095892
explicitly specify container image 2025-07-11 16:19:35 +02:00
45b841526d
add label for new runner 2025-07-11 16:19:35 +02:00
3d6730f6f4
try out existing nix container made for gitea actions 2025-07-11 16:19:35 +02:00
1a93775661
Switch all CI jobs to nixos label 2025-07-11 16:19:35 +02:00
16 changed files with 61 additions and 98 deletions

View file

@ -4,7 +4,7 @@ on:
workflow_dispatch: # allows manual triggering
push:
branches:
- main
# - main
jobs:
deploy:
@ -13,7 +13,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key for age secrets and SSH
- name: Set up SSH key to access age secrets
run: |
env
mkdir -p ~/.ssh
@ -21,4 +21,4 @@ jobs:
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'
run: nix-shell --run 'nixops4 apply default'

View file

@ -21,23 +21,17 @@ jobs:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-mastodon:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
check-peertube:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
- run: nix-build services -A tests.peertube
check-panel:
runs-on: nix
steps:
- uses: actions/checkout@v4
- run: nix-build -A tests.panel
- run: nix-build panel -A tests
check-deployment-basic:
runs-on: nix

View file

@ -12,7 +12,6 @@ let
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
@ -72,7 +71,6 @@ in
tests = {
inherit pre-commit-check;
panel = panel.tests;
};
# re-export inputs so they can be overridden granularly

View file

@ -17,20 +17,15 @@
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
perSystem =
{
pkgs,
lib,
system,
...
}:
{
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
};
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =

View file

@ -1,13 +1,14 @@
# Infra
This directory contains the definition of [the VMs](../machines/machines.md) that host our
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.
## Provisioning VMs with an initial configuration
> NOTE[Niols]: This is still very manual and clunky. Two things will happen:
> 1. In the near future, I will improve the provisioning script to make this a bit less clunky.
> 2. In the far future, NixOps4 will be able to communicate with Proxmox directly and everything will become much cleaner.
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
future, I will improve the provisioning script to make this a bit less clunky.
In the far future, NixOps4 will be able to communicate with Proxmox directly and
everything will become much cleaner.
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`.
@ -24,7 +25,8 @@ infrastructure.
Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration.
> FIXME: Remove this step by making the provisioning script not fail with the public key does not exist yet.
FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script:
```
@ -42,7 +44,7 @@ infrastructure.
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
```
> FIXME: Make the provisioning script do that for us.
FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines:
```
@ -54,7 +56,7 @@ infrastructure.
just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration.
> FIXME: Figure out why the full configuration isn't on the machine at this
FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it.
## Updating existing VM configurations

View file

@ -1,4 +1,15 @@
{ sources, ... }:
let
# pulling this in manually over from module args resolves an infinite recursion.
# FIXME: instead untangle `//infra/flake-part.nix` and make it stop passing wild functions.
# move moving towards a portable-services-like pattern where some things are submodules.
# Right now those wild functions are for parameterising a bunch of things,
# and the modular way to do that would be options --
# obviously you can't use those for `imports`,
# so one way to decouple fixpoints is to isolate them into submodules.
# Therefore one approach would be to try to go down the call graph,
# and see where what's currently a function could be a `submodule` field of something else.
sources = import ../../npins;
in
{
_class = "nixos";

View file

@ -58,8 +58,6 @@ in
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
# allow continuous deployment access
keys.cd
];
};

View file

@ -27,6 +27,7 @@ let
_module.args = {
inherit
inputs
sources
keys
secrets
;
@ -36,10 +37,6 @@ let
./common/proxmox-qemu-vm.nix
];
nixos.specialArgs = {
inherit sources;
};
imports =
[
./common/resource.nix
@ -69,33 +66,17 @@ let
vmNames:
{ providers, ... }:
{
# XXX: this type merge is for adding `specialArgs` to resource modules
options.resources = mkOption {
type =
with lib.types;
lazyAttrsOf (submoduleWith {
class = "nixops4Resource";
modules = [ ];
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs`
# once we're sure it's sane. leaving it here for better control during refactoring.
specialArgs = {
inherit sources;
};
});
};
config = {
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];

View file

@ -1,17 +1,17 @@
{
config,
sources,
...
}:
let
name = "panel";
sources = import ../../../npins;
in
{
_class = "nixos";
imports = [
(import ../../../panel { }).module
"${sources.home-manager}/nixos"
(import "${sources.home-manager}/nixos")
];
security.acme = {

View file

@ -73,7 +73,7 @@ in
"${storeDeps}/etc/ssl"
];
};
log.level = "trace";
log.level = "info";
runner = {
file = ".runner";
# Take only 1 job at a time to avoid clashing NixOS tests, see #362

View file

@ -45,7 +45,7 @@ in
'';
};
module = ./nix/configuration.nix;
module = import ./nix/configuration.nix;
tests = pkgs.callPackage ./nix/tests.nix { };
# re-export inputs so they can be overridden granularly

13
services/default.nix Normal file
View file

@ -0,0 +1,13 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
...
}:
{
tests = {
mastodon = pkgs.nixosTest ./tests/mastodon.nix;
pixelfed-garage = pkgs.nixosTest ./tests/pixelfed-garage.nix;
peertube = pkgs.nixosTest ./tests/peertube.nix;
};
}

View file

@ -49,7 +49,7 @@ in
displayName = mkOption {
type = types.str;
description = "Name of the initial user, for humans";
default = config.fediversity.temp.initialUser.username;
default = config.fediversity.temp.initialUser.name;
};
email = mkOption {
type = types.str;

View file

@ -1,14 +0,0 @@
{ ... }:
{
_class = "flake";
perSystem =
{ pkgs, ... }:
{
checks = {
test-mastodon-service = pkgs.testers.runNixOSTest ./mastodon.nix;
test-pixelfed-garage-service = pkgs.testers.runNixOSTest ./pixelfed-garage.nix;
test-peertube-service = pkgs.testers.runNixOSTest ./peertube.nix;
};
};
}

View file

@ -6,7 +6,7 @@
{ pkgs, ... }:
let
inherit (pkgs) lib writeText;
lib = pkgs.lib;
## FIXME: this binding was not used, but maybe we want a side-effect or something?
# rebuildableTest = import ./rebuildableTest.nix pkgs;
@ -69,17 +69,9 @@ in
expect
];
environment.variables = {
AWS_ACCESS_KEY_ID = "$(cat ${config.fediversity.mastodon.s3AccessKeyFile})";
AWS_SECRET_ACCESS_KEY = "$(cat ${config.fediversity.mastodon.s3SecretKeyFile})";
AWS_ACCESS_KEY_ID = config.fediversity.garage.ensureKeys.mastodon.id;
AWS_SECRET_ACCESS_KEY = config.fediversity.garage.ensureKeys.mastodon.secret;
};
services.mastodon.extraEnvFiles = [
# generate as: cd ${pkgs.mastodon}; IGNORE_ALREADY_SET_SECRETS=true RAILS_ENV=development ${pkgs.mastodon}/bin/rails db:encryption:init
(writeText "rest" ''
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=naGoEzeyjUmwIlmgZZmGQDWJrlWud5eX
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=A0tE1VJ7S3cjaOQ58mNkhrVFY7o5NKDB
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=tGHhd5Os7hLxa8QTzWwjyVLrvsj5VsCw
'')
];
};
};

View file

@ -113,7 +113,6 @@ let
${seleniumQuit}'';
dummyFile = pkgs.writeText "dummy" "dummy";
in
{
name = "test-pixelfed-garage";
@ -162,8 +161,8 @@ in
];
environment.variables = {
POST_MEDIA = ./fediversity.png;
AWS_ACCESS_KEY_ID = "$(cat ${config.fediversity.pixelfed.s3AccessKeyFile})";
AWS_SECRET_ACCESS_KEY = "$(cat ${config.fediversity.pixelfed.s3SecretKeyFile})";
AWS_ACCESS_KEY_ID = config.fediversity.garage.ensureKeys.pixelfed.id;
AWS_SECRET_ACCESS_KEY = config.fediversity.garage.ensureKeys.pixelfed.secret;
## without this we get frivolous errors in the logs
MC_REGION = "garage";
};
@ -171,12 +170,6 @@ in
users.users.selenium = {
isNormalUser = true;
};
fediversity.temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};